Merge branches 'pm-cpuidle', 'pm-core' and 'pm-sleep'
[linux-block.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
4721031c 40#include <net/gro.h>
c0c050c5
MC
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/udp.h>
44#include <net/checksum.h>
45#include <net/ip6_checksum.h>
ad51b8e9 46#include <net/udp_tunnel.h>
c0c050c5
MC
47#include <linux/workqueue.h>
48#include <linux/prefetch.h>
49#include <linux/cache.h>
50#include <linux/log2.h>
51#include <linux/aer.h>
52#include <linux/bitmap.h>
53#include <linux/cpu_rmap.h>
56f0fd80 54#include <linux/cpumask.h>
2ae7408f 55#include <net/pkt_cls.h>
cde49a42
VV
56#include <linux/hwmon.h>
57#include <linux/hwmon-sysfs.h>
322b87ca 58#include <net/page_pool.h>
32861236 59#include <linux/align.h>
c0c050c5
MC
60
61#include "bnxt_hsi.h"
62#include "bnxt.h"
3c8c20db 63#include "bnxt_hwrm.h"
a588e458 64#include "bnxt_ulp.h"
c0c050c5
MC
65#include "bnxt_sriov.h"
66#include "bnxt_ethtool.h"
7df4ae9f 67#include "bnxt_dcb.h"
c6d30e83 68#include "bnxt_xdp.h"
ae5c42f0 69#include "bnxt_ptp.h"
4ab0c6a8 70#include "bnxt_vfr.h"
2ae7408f 71#include "bnxt_tc.h"
3c467bf3 72#include "bnxt_devlink.h"
cabfb09d 73#include "bnxt_debugfs.h"
c0c050c5
MC
74
75#define BNXT_TX_TIMEOUT (5 * HZ)
e8d8c5d8
JK
76#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
77 NETIF_MSG_TX_ERR)
c0c050c5 78
c0c050c5
MC
79MODULE_LICENSE("GPL");
80MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
c0c050c5
MC
81
82#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84#define BNXT_RX_COPY_THRESH 256
85
4419dbe6 86#define BNXT_TX_PUSH_THRESH 164
c0c050c5 87
c7dd4a5b 88/* indexed by enum board_idx */
c0c050c5
MC
89static const struct {
90 char *name;
91} board_info[] = {
27573a7d
SB
92 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
93 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
94 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
95 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
96 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
97 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
98 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
99 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
100 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
101 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
102 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
103 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
104 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
105 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
106 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
108 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
109 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
110 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
111 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
112 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
113 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
114 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
115 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
116 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
117 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
118 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
119 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 120 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 121 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 122 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 123 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
49c98421
MC
124 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
125 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
126 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
27573a7d 127 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 128 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
129 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
130 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
131 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 132 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
7fbf359b
MC
133 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
134 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
b16b6891 135 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
7fbf359b 136 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
c0c050c5
MC
137};
138
139static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
140 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
141 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 142 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 143 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 144 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
145 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
146 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 147 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 148 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
149 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
150 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 151 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
152 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
153 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
154 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
155 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
156 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
157 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
158 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
159 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 160 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 161 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
162 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
163 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
164 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
166 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
167 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
168 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 169 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 170 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 171 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 172 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 173 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 174 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 175 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 176 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 177 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
49c98421
MC
178 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
183 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
4a58139b 184 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 185 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 186#ifdef CONFIG_BNXT_SRIOV
c7ef35eb 187 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
188 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
189 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
c7ef35eb 190 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
7fbf359b 191 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
adbc8305 192 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
193 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
194 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
195 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
196 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
adbc8305
MC
197 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
198 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
199 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
200 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
201 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
7fbf359b 202 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
51fec80d 203 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 204 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
7fbf359b
MC
205 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
206 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
618784e3 207 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
208#endif
209 { 0 }
210};
211
212MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
213
214static const u16 bnxt_vf_req_snif[] = {
215 HWRM_FUNC_CFG,
91cdda40 216 HWRM_FUNC_VF_CFG,
c0c050c5
MC
217 HWRM_PORT_PHY_QCFG,
218 HWRM_CFA_L2_FILTER_ALLOC,
219};
220
25be8623 221static const u16 bnxt_async_events_arr[] = {
87c374de 222 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
b1613e78 223 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
87c374de
MC
224 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
225 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
226 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
227 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
b1613e78 228 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
2151fe08 229 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
7e914027 230 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
a44daa8f 231 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
68f684e2 232 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
8d4bd96b 233 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
df97b34d 234 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
099fdeda 235 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
abf90ac2 236 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
8bcf6f04 237 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
25be8623
MC
238};
239
c213eae8
MC
240static struct workqueue_struct *bnxt_pf_wq;
241
c0c050c5
MC
242static bool bnxt_vf_pciid(enum board_idx idx)
243{
618784e3 244 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
7fbf359b 245 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
ab21494b
AG
246 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
247 idx == NETXTREME_E_P5_VF_HV);
c0c050c5
MC
248}
249
250#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
251#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
252#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
253
c0c050c5
MC
254#define BNXT_CP_DB_IRQ_DIS(db) \
255 writel(DB_CP_IRQ_DIS_FLAGS, db)
256
697197e5
MC
257#define BNXT_DB_CQ(db, idx) \
258 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
259
260#define BNXT_DB_NQ_P5(db, idx) \
c6132f6f
MC
261 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
262 (db)->doorbell)
697197e5
MC
263
264#define BNXT_DB_CQ_ARM(db, idx) \
265 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
266
267#define BNXT_DB_NQ_ARM_P5(db, idx) \
c6132f6f
MC
268 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
269 (db)->doorbell)
697197e5
MC
270
271static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
272{
273 if (bp->flags & BNXT_FLAG_CHIP_P5)
274 BNXT_DB_NQ_P5(db, idx);
275 else
276 BNXT_DB_CQ(db, idx);
277}
278
279static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
280{
281 if (bp->flags & BNXT_FLAG_CHIP_P5)
282 BNXT_DB_NQ_ARM_P5(db, idx);
283 else
284 BNXT_DB_CQ_ARM(db, idx);
285}
286
287static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
288{
289 if (bp->flags & BNXT_FLAG_CHIP_P5)
c6132f6f
MC
290 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
291 RING_CMP(idx), db->doorbell);
697197e5
MC
292 else
293 BNXT_DB_CQ(db, idx);
294}
295
38413406 296const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
297 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
298 TX_BD_FLAGS_LHINT_512_TO_1023,
299 TX_BD_FLAGS_LHINT_1024_TO_2047,
300 TX_BD_FLAGS_LHINT_1024_TO_2047,
301 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
311 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
312 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
313 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
314 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
315 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
316};
317
ee5c7fb3
SP
318static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
319{
320 struct metadata_dst *md_dst = skb_metadata_dst(skb);
321
322 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
323 return 0;
324
325 return md_dst->u.port_info.port_id;
326}
327
e8d8c5d8
JK
328static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
329 u16 prod)
330{
331 bnxt_db_write(bp, &txr->tx_db, prod);
332 txr->kick_pending = 0;
333}
334
3c603136
JK
335static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
336 struct bnxt_tx_ring_info *txr,
337 struct netdev_queue *txq)
338{
339 netif_tx_stop_queue(txq);
340
341 /* netif_tx_stop_queue() must be done before checking
342 * tx index in bnxt_tx_avail() below, because in
343 * bnxt_tx_int(), we update tx index before checking for
344 * netif_tx_queue_stopped().
345 */
346 smp_mb();
5bed8b07 347 if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
3c603136
JK
348 netif_tx_wake_queue(txq);
349 return false;
350 }
351
352 return true;
353}
354
c0c050c5
MC
355static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
356{
357 struct bnxt *bp = netdev_priv(dev);
358 struct tx_bd *txbd;
359 struct tx_bd_ext *txbd1;
360 struct netdev_queue *txq;
361 int i;
362 dma_addr_t mapping;
363 unsigned int length, pad = 0;
364 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
365 u16 prod, last_frag;
366 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
367 struct bnxt_tx_ring_info *txr;
368 struct bnxt_sw_tx_bd *tx_buf;
dade5e15 369 __le32 lflags = 0;
c0c050c5
MC
370
371 i = skb_get_queue_mapping(skb);
372 if (unlikely(i >= bp->tx_nr_rings)) {
373 dev_kfree_skb_any(skb);
625788b5 374 dev_core_stats_tx_dropped_inc(dev);
c0c050c5
MC
375 return NETDEV_TX_OK;
376 }
377
c0c050c5 378 txq = netdev_get_tx_queue(dev, i);
a960dec9 379 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
380 prod = txr->tx_prod;
381
382 free_size = bnxt_tx_avail(bp, txr);
383 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
e8d8c5d8
JK
384 /* We must have raced with NAPI cleanup */
385 if (net_ratelimit() && txr->kick_pending)
386 netif_warn(bp, tx_err, dev,
387 "bnxt: ring busy w/ flush pending!\n");
3c603136
JK
388 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
389 return NETDEV_TX_BUSY;
c0c050c5
MC
390 }
391
b6488b16
CL
392 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
393 goto tx_free;
394
c0c050c5
MC
395 length = skb->len;
396 len = skb_headlen(skb);
397 last_frag = skb_shinfo(skb)->nr_frags;
398
399 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
400
401 txbd->tx_bd_opaque = prod;
402
403 tx_buf = &txr->tx_buf_ring[prod];
404 tx_buf->skb = skb;
405 tx_buf->nr_frags = last_frag;
406
407 vlan_tag_flags = 0;
ee5c7fb3 408 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
409 if (skb_vlan_tag_present(skb)) {
410 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
411 skb_vlan_tag_get(skb);
412 /* Currently supports 8021Q, 8021AD vlan offloads
413 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
414 */
415 if (skb->vlan_proto == htons(ETH_P_8021Q))
416 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
417 }
418
83bb623c
PC
419 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
420 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
421
422 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
423 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
9e266807
MC
424 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
425 &ptp->tx_hdr_off)) {
426 if (vlan_tag_flags)
427 ptp->tx_hdr_off += VLAN_HLEN;
83bb623c
PC
428 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
429 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
430 } else {
431 atomic_inc(&bp->ptp_cfg->tx_avail);
432 }
433 }
dade5e15
MC
434 }
435
83bb623c
PC
436 if (unlikely(skb->no_fcs))
437 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
438
439 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
440 !lflags) {
4419dbe6
MC
441 struct tx_push_buffer *tx_push_buf = txr->tx_push;
442 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
443 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 444 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
445 void *pdata = tx_push_buf->data;
446 u64 *end;
447 int j, push_len;
c0c050c5
MC
448
449 /* Set COAL_NOW to be ready quickly for the next push */
450 tx_push->tx_bd_len_flags_type =
451 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
452 TX_BD_TYPE_LONG_TX_BD |
453 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
454 TX_BD_FLAGS_COAL_NOW |
455 TX_BD_FLAGS_PACKET_END |
456 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
457
458 if (skb->ip_summed == CHECKSUM_PARTIAL)
459 tx_push1->tx_bd_hsize_lflags =
460 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
461 else
462 tx_push1->tx_bd_hsize_lflags = 0;
463
464 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
465 tx_push1->tx_bd_cfa_action =
466 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 467
fbb0fa8b
MC
468 end = pdata + length;
469 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
470 *end = 0;
471
c0c050c5
MC
472 skb_copy_from_linear_data(skb, pdata, len);
473 pdata += len;
474 for (j = 0; j < last_frag; j++) {
475 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
476 void *fptr;
477
478 fptr = skb_frag_address_safe(frag);
479 if (!fptr)
480 goto normal_tx;
481
482 memcpy(pdata, fptr, skb_frag_size(frag));
483 pdata += skb_frag_size(frag);
484 }
485
4419dbe6
MC
486 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
487 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
488 prod = NEXT_TX(prod);
489 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
490 memcpy(txbd, tx_push1, sizeof(*txbd));
491 prod = NEXT_TX(prod);
4419dbe6 492 tx_push->doorbell =
c0c050c5
MC
493 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
494 txr->tx_prod = prod;
495
b9a8460a 496 tx_buf->is_push = 1;
c0c050c5 497 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 498 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 499
4419dbe6
MC
500 push_len = (length + sizeof(*tx_push) + 7) / 8;
501 if (push_len > 16) {
697197e5
MC
502 __iowrite64_copy(db, tx_push_buf, 16);
503 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 504 (push_len - 16) << 1);
4419dbe6 505 } else {
697197e5 506 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 507 }
c0c050c5 508
c0c050c5
MC
509 goto tx_done;
510 }
511
512normal_tx:
513 if (length < BNXT_MIN_PKT_SIZE) {
514 pad = BNXT_MIN_PKT_SIZE - length;
e8d8c5d8 515 if (skb_pad(skb, pad))
c0c050c5 516 /* SKB already freed. */
e8d8c5d8 517 goto tx_kick_pending;
c0c050c5
MC
518 length = BNXT_MIN_PKT_SIZE;
519 }
520
521 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
522
e8d8c5d8
JK
523 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
524 goto tx_free;
c0c050c5
MC
525
526 dma_unmap_addr_set(tx_buf, mapping, mapping);
527 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
528 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
529
530 txbd->tx_bd_haddr = cpu_to_le64(mapping);
531
532 prod = NEXT_TX(prod);
533 txbd1 = (struct tx_bd_ext *)
534 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
535
dade5e15 536 txbd1->tx_bd_hsize_lflags = lflags;
c0c050c5
MC
537 if (skb_is_gso(skb)) {
538 u32 hdr_len;
539
540 if (skb->encapsulation)
504148fe 541 hdr_len = skb_inner_tcp_all_headers(skb);
c0c050c5 542 else
504148fe 543 hdr_len = skb_tcp_all_headers(skb);
c0c050c5 544
dade5e15 545 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
c0c050c5
MC
546 TX_BD_FLAGS_T_IPID |
547 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
548 length = skb_shinfo(skb)->gso_size;
549 txbd1->tx_bd_mss = cpu_to_le32(length);
550 length += hdr_len;
551 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
dade5e15 552 txbd1->tx_bd_hsize_lflags |=
c0c050c5
MC
553 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
554 txbd1->tx_bd_mss = 0;
555 }
556
557 length >>= 9;
2b3c6885
MC
558 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
559 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
560 skb->len);
561 i = 0;
562 goto tx_dma_error;
563 }
c0c050c5
MC
564 flags |= bnxt_lhint_arr[length];
565 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
566
567 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
568 txbd1->tx_bd_cfa_action =
569 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
570 for (i = 0; i < last_frag; i++) {
571 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
572
573 prod = NEXT_TX(prod);
574 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
575
576 len = skb_frag_size(frag);
577 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
578 DMA_TO_DEVICE);
579
580 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
581 goto tx_dma_error;
582
583 tx_buf = &txr->tx_buf_ring[prod];
584 dma_unmap_addr_set(tx_buf, mapping, mapping);
585
586 txbd->tx_bd_haddr = cpu_to_le64(mapping);
587
588 flags = len << TX_BD_LEN_SHIFT;
589 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
590 }
591
592 flags &= ~TX_BD_LEN;
593 txbd->tx_bd_len_flags_type =
594 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
595 TX_BD_FLAGS_PACKET_END);
596
597 netdev_tx_sent_queue(txq, skb->len);
598
83bb623c
PC
599 skb_tx_timestamp(skb);
600
c0c050c5
MC
601 /* Sync BD data before updating doorbell */
602 wmb();
603
604 prod = NEXT_TX(prod);
605 txr->tx_prod = prod;
606
6b16f9ee 607 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
e8d8c5d8
JK
608 bnxt_txr_db_kick(bp, txr, prod);
609 else
610 txr->kick_pending = 1;
c0c050c5
MC
611
612tx_done:
613
c0c050c5 614 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 615 if (netdev_xmit_more() && !tx_buf->is_push)
e8d8c5d8 616 bnxt_txr_db_kick(bp, txr, prod);
c0c050c5 617
3c603136 618 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
c0c050c5
MC
619 }
620 return NETDEV_TX_OK;
621
622tx_dma_error:
83bb623c
PC
623 if (BNXT_TX_PTP_IS_SET(lflags))
624 atomic_inc(&bp->ptp_cfg->tx_avail);
625
c0c050c5
MC
626 last_frag = i;
627
628 /* start back at beginning and unmap skb */
629 prod = txr->tx_prod;
630 tx_buf = &txr->tx_buf_ring[prod];
c0c050c5 631 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
df70303d 632 skb_headlen(skb), DMA_TO_DEVICE);
c0c050c5
MC
633 prod = NEXT_TX(prod);
634
635 /* unmap remaining mapped pages */
636 for (i = 0; i < last_frag; i++) {
637 prod = NEXT_TX(prod);
638 tx_buf = &txr->tx_buf_ring[prod];
639 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
640 skb_frag_size(&skb_shinfo(skb)->frags[i]),
df70303d 641 DMA_TO_DEVICE);
c0c050c5
MC
642 }
643
e8d8c5d8 644tx_free:
c0c050c5 645 dev_kfree_skb_any(skb);
e8d8c5d8
JK
646tx_kick_pending:
647 if (txr->kick_pending)
648 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
649 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
625788b5 650 dev_core_stats_tx_dropped_inc(dev);
c0c050c5
MC
651 return NETDEV_TX_OK;
652}
653
654static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
655{
b6ab4b01 656 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 657 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
658 u16 cons = txr->tx_cons;
659 struct pci_dev *pdev = bp->pdev;
660 int i;
661 unsigned int tx_bytes = 0;
662
663 for (i = 0; i < nr_pkts; i++) {
664 struct bnxt_sw_tx_bd *tx_buf;
665 struct sk_buff *skb;
666 int j, last;
667
668 tx_buf = &txr->tx_buf_ring[cons];
669 cons = NEXT_TX(cons);
670 skb = tx_buf->skb;
671 tx_buf->skb = NULL;
672
c31f26c8
JK
673 tx_bytes += skb->len;
674
c0c050c5
MC
675 if (tx_buf->is_push) {
676 tx_buf->is_push = 0;
677 goto next_tx_int;
678 }
679
680 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
df70303d 681 skb_headlen(skb), DMA_TO_DEVICE);
c0c050c5
MC
682 last = tx_buf->nr_frags;
683
684 for (j = 0; j < last; j++) {
685 cons = NEXT_TX(cons);
686 tx_buf = &txr->tx_buf_ring[cons];
687 dma_unmap_page(
688 &pdev->dev,
689 dma_unmap_addr(tx_buf, mapping),
690 skb_frag_size(&skb_shinfo(skb)->frags[j]),
df70303d 691 DMA_TO_DEVICE);
c0c050c5 692 }
83bb623c
PC
693 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
694 if (bp->flags & BNXT_FLAG_CHIP_P5) {
c31f26c8 695 /* PTP worker takes ownership of the skb */
83bb623c 696 if (!bnxt_get_tx_ts_p5(bp, skb))
c31f26c8 697 skb = NULL;
83bb623c
PC
698 else
699 atomic_inc(&bp->ptp_cfg->tx_avail);
700 }
701 }
c0c050c5
MC
702
703next_tx_int:
704 cons = NEXT_TX(cons);
705
c31f26c8 706 dev_kfree_skb_any(skb);
c0c050c5
MC
707 }
708
709 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
710 txr->tx_cons = cons;
711
712 /* Need to make the tx_cons update visible to bnxt_start_xmit()
713 * before checking for netif_tx_queue_stopped(). Without the
714 * memory barrier, there is a small possibility that bnxt_start_xmit()
715 * will miss it and cause the queue to be stopped forever.
716 */
717 smp_mb();
718
719 if (unlikely(netif_tx_queue_stopped(txq)) &&
5bed8b07 720 bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
3c603136
JK
721 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
722 netif_tx_wake_queue(txq);
c0c050c5
MC
723}
724
c61fb99c 725static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 726 struct bnxt_rx_ring_info *rxr,
c61fb99c
MC
727 gfp_t gfp)
728{
729 struct device *dev = &bp->pdev->dev;
730 struct page *page;
731
322b87ca 732 page = page_pool_dev_alloc_pages(rxr->page_pool);
c61fb99c
MC
733 if (!page)
734 return NULL;
735
c519fe9a
SN
736 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
737 DMA_ATTR_WEAK_ORDERING);
c61fb99c 738 if (dma_mapping_error(dev, *mapping)) {
322b87ca 739 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
740 return NULL;
741 }
c61fb99c
MC
742 return page;
743}
744
720908e5 745static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
c0c050c5
MC
746 gfp_t gfp)
747{
748 u8 *data;
749 struct pci_dev *pdev = bp->pdev;
750
720908e5
JK
751 if (gfp == GFP_ATOMIC)
752 data = napi_alloc_frag(bp->rx_buf_size);
753 else
754 data = netdev_alloc_frag(bp->rx_buf_size);
c0c050c5
MC
755 if (!data)
756 return NULL;
757
c519fe9a
SN
758 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
759 bp->rx_buf_use_size, bp->rx_dir,
760 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
761
762 if (dma_mapping_error(&pdev->dev, *mapping)) {
720908e5 763 skb_free_frag(data);
c0c050c5
MC
764 data = NULL;
765 }
766 return data;
767}
768
38413406
MC
769int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
770 u16 prod, gfp_t gfp)
c0c050c5
MC
771{
772 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
773 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
774 dma_addr_t mapping;
775
c61fb99c 776 if (BNXT_RX_PAGE_MODE(bp)) {
322b87ca
AG
777 struct page *page =
778 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
c0c050c5 779
c61fb99c
MC
780 if (!page)
781 return -ENOMEM;
782
9a6aa350 783 mapping += bp->rx_dma_offset;
c61fb99c
MC
784 rx_buf->data = page;
785 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
786 } else {
720908e5 787 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
c61fb99c
MC
788
789 if (!data)
790 return -ENOMEM;
791
792 rx_buf->data = data;
793 rx_buf->data_ptr = data + bp->rx_offset;
794 }
11cd119d 795 rx_buf->mapping = mapping;
c0c050c5
MC
796
797 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
798 return 0;
799}
800
c6d30e83 801void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
802{
803 u16 prod = rxr->rx_prod;
804 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
805 struct rx_bd *cons_bd, *prod_bd;
806
807 prod_rx_buf = &rxr->rx_buf_ring[prod];
808 cons_rx_buf = &rxr->rx_buf_ring[cons];
809
810 prod_rx_buf->data = data;
6bb19474 811 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 812
11cd119d 813 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
814
815 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
816 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
817
818 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
819}
820
821static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
822{
823 u16 next, max = rxr->rx_agg_bmap_size;
824
825 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
826 if (next >= max)
827 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
828 return next;
829}
830
831static inline int bnxt_alloc_rx_page(struct bnxt *bp,
832 struct bnxt_rx_ring_info *rxr,
833 u16 prod, gfp_t gfp)
834{
835 struct rx_bd *rxbd =
836 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
837 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
838 struct pci_dev *pdev = bp->pdev;
839 struct page *page;
840 dma_addr_t mapping;
841 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 842 unsigned int offset = 0;
c0c050c5 843
9a6aa350
AG
844 if (BNXT_RX_PAGE_MODE(bp)) {
845 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
846
847 if (!page)
848 return -ENOMEM;
849
850 } else {
851 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
852 page = rxr->rx_page;
853 if (!page) {
854 page = alloc_page(gfp);
855 if (!page)
856 return -ENOMEM;
857 rxr->rx_page = page;
858 rxr->rx_page_offset = 0;
859 }
860 offset = rxr->rx_page_offset;
861 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
862 if (rxr->rx_page_offset == PAGE_SIZE)
863 rxr->rx_page = NULL;
864 else
865 get_page(page);
866 } else {
89d0a06c
MC
867 page = alloc_page(gfp);
868 if (!page)
869 return -ENOMEM;
89d0a06c 870 }
c0c050c5 871
9a6aa350
AG
872 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
873 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
874 DMA_ATTR_WEAK_ORDERING);
875 if (dma_mapping_error(&pdev->dev, mapping)) {
876 __free_page(page);
877 return -EIO;
878 }
c0c050c5
MC
879 }
880
881 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
882 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
883
884 __set_bit(sw_prod, rxr->rx_agg_bmap);
885 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
886 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
887
888 rx_agg_buf->page = page;
89d0a06c 889 rx_agg_buf->offset = offset;
c0c050c5
MC
890 rx_agg_buf->mapping = mapping;
891 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
892 rxbd->rx_bd_opaque = sw_prod;
893 return 0;
894}
895
4a228a3a
MC
896static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
897 struct bnxt_cp_ring_info *cpr,
898 u16 cp_cons, u16 curr)
899{
900 struct rx_agg_cmp *agg;
901
902 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
903 agg = (struct rx_agg_cmp *)
904 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
905 return agg;
906}
907
bfcd8d79
MC
908static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
909 struct bnxt_rx_ring_info *rxr,
910 u16 agg_id, u16 curr)
911{
912 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
913
914 return &tpa_info->agg_arr[curr];
915}
916
4a228a3a
MC
917static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
918 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 919{
e44758b7 920 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 921 struct bnxt *bp = bnapi->bp;
b6ab4b01 922 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
923 u16 prod = rxr->rx_agg_prod;
924 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 925 bool p5_tpa = false;
c0c050c5
MC
926 u32 i;
927
bfcd8d79
MC
928 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
929 p5_tpa = true;
930
c0c050c5
MC
931 for (i = 0; i < agg_bufs; i++) {
932 u16 cons;
933 struct rx_agg_cmp *agg;
934 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
935 struct rx_bd *prod_bd;
936 struct page *page;
937
bfcd8d79
MC
938 if (p5_tpa)
939 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
940 else
941 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
942 cons = agg->rx_agg_cmp_opaque;
943 __clear_bit(cons, rxr->rx_agg_bmap);
944
945 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
946 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
947
948 __set_bit(sw_prod, rxr->rx_agg_bmap);
949 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
950 cons_rx_buf = &rxr->rx_agg_ring[cons];
951
952 /* It is possible for sw_prod to be equal to cons, so
953 * set cons_rx_buf->page to NULL first.
954 */
955 page = cons_rx_buf->page;
956 cons_rx_buf->page = NULL;
957 prod_rx_buf->page = page;
89d0a06c 958 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
959
960 prod_rx_buf->mapping = cons_rx_buf->mapping;
961
962 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
963
964 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
965 prod_bd->rx_bd_opaque = sw_prod;
966
967 prod = NEXT_RX_AGG(prod);
968 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
969 }
970 rxr->rx_agg_prod = prod;
971 rxr->rx_sw_agg_prod = sw_prod;
972}
973
1dc4c557
AG
974static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
975 struct bnxt_rx_ring_info *rxr,
976 u16 cons, void *data, u8 *data_ptr,
977 dma_addr_t dma_addr,
978 unsigned int offset_and_len)
979{
980 unsigned int len = offset_and_len & 0xffff;
981 struct page *page = data;
982 u16 prod = rxr->rx_prod;
983 struct sk_buff *skb;
984 int err;
985
986 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
987 if (unlikely(err)) {
988 bnxt_reuse_rx_data(rxr, cons, data);
989 return NULL;
990 }
991 dma_addr -= bp->rx_dma_offset;
992 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
993 DMA_ATTR_WEAK_ORDERING);
1abeacc1 994 skb = build_skb(page_address(page), PAGE_SIZE);
1dc4c557 995 if (!skb) {
97f5e03a 996 page_pool_recycle_direct(rxr->page_pool, page);
1dc4c557
AG
997 return NULL;
998 }
999 skb_mark_for_recycle(skb);
1000 skb_reserve(skb, bp->rx_dma_offset);
1001 __skb_put(skb, len);
1002
1003 return skb;
1004}
1005
c61fb99c
MC
1006static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1007 struct bnxt_rx_ring_info *rxr,
1008 u16 cons, void *data, u8 *data_ptr,
1009 dma_addr_t dma_addr,
1010 unsigned int offset_and_len)
1011{
1012 unsigned int payload = offset_and_len >> 16;
1013 unsigned int len = offset_and_len & 0xffff;
d7840976 1014 skb_frag_t *frag;
c61fb99c
MC
1015 struct page *page = data;
1016 u16 prod = rxr->rx_prod;
1017 struct sk_buff *skb;
1018 int off, err;
1019
1020 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1021 if (unlikely(err)) {
1022 bnxt_reuse_rx_data(rxr, cons, data);
1023 return NULL;
1024 }
1025 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
1026 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1027 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
1028
1029 if (unlikely(!payload))
c43f1255 1030 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
1031
1032 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1033 if (!skb) {
97f5e03a 1034 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
1035 return NULL;
1036 }
1037
1dc4c557 1038 skb_mark_for_recycle(skb);
c61fb99c
MC
1039 off = (void *)data_ptr - page_address(page);
1040 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1041 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1042 payload + NET_IP_ALIGN);
1043
1044 frag = &skb_shinfo(skb)->frags[0];
1045 skb_frag_size_sub(frag, payload);
b54c9d5b 1046 skb_frag_off_add(frag, payload);
c61fb99c
MC
1047 skb->data_len -= payload;
1048 skb->tail += payload;
1049
1050 return skb;
1051}
1052
c0c050c5
MC
1053static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1054 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
1055 void *data, u8 *data_ptr,
1056 dma_addr_t dma_addr,
1057 unsigned int offset_and_len)
c0c050c5 1058{
6bb19474 1059 u16 prod = rxr->rx_prod;
c0c050c5 1060 struct sk_buff *skb;
6bb19474 1061 int err;
c0c050c5
MC
1062
1063 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1064 if (unlikely(err)) {
1065 bnxt_reuse_rx_data(rxr, cons, data);
1066 return NULL;
1067 }
1068
720908e5 1069 skb = build_skb(data, bp->rx_buf_size);
c519fe9a
SN
1070 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1071 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5 1072 if (!skb) {
720908e5 1073 skb_free_frag(data);
c0c050c5
MC
1074 return NULL;
1075 }
1076
b3dba77c 1077 skb_reserve(skb, bp->rx_offset);
6bb19474 1078 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
1079 return skb;
1080}
1081
23e4c046
AG
1082static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1083 struct bnxt_cp_ring_info *cpr,
1084 struct skb_shared_info *shinfo,
31b9998b
AG
1085 u16 idx, u32 agg_bufs, bool tpa,
1086 struct xdp_buff *xdp)
c0c050c5 1087{
e44758b7 1088 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 1089 struct pci_dev *pdev = bp->pdev;
b6ab4b01 1090 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1091 u16 prod = rxr->rx_agg_prod;
ca1df2dd 1092 u32 i, total_frag_len = 0;
bfcd8d79 1093 bool p5_tpa = false;
c0c050c5 1094
bfcd8d79
MC
1095 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1096 p5_tpa = true;
1097
c0c050c5 1098 for (i = 0; i < agg_bufs; i++) {
ca1df2dd 1099 skb_frag_t *frag = &shinfo->frags[i];
c0c050c5
MC
1100 u16 cons, frag_len;
1101 struct rx_agg_cmp *agg;
1102 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1103 struct page *page;
1104 dma_addr_t mapping;
1105
bfcd8d79
MC
1106 if (p5_tpa)
1107 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1108 else
1109 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1110 cons = agg->rx_agg_cmp_opaque;
1111 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1112 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1113
1114 cons_rx_buf = &rxr->rx_agg_ring[cons];
ca1df2dd
AG
1115 skb_frag_off_set(frag, cons_rx_buf->offset);
1116 skb_frag_size_set(frag, frag_len);
1117 __skb_frag_set_page(frag, cons_rx_buf->page);
1118 shinfo->nr_frags = i + 1;
c0c050c5
MC
1119 __clear_bit(cons, rxr->rx_agg_bmap);
1120
1121 /* It is possible for bnxt_alloc_rx_page() to allocate
1122 * a sw_prod index that equals the cons index, so we
1123 * need to clear the cons entry now.
1124 */
11cd119d 1125 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1126 page = cons_rx_buf->page;
1127 cons_rx_buf->page = NULL;
1128
31b9998b
AG
1129 if (xdp && page_is_pfmemalloc(page))
1130 xdp_buff_set_frag_pfmemalloc(xdp);
1131
c0c050c5 1132 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
c0c050c5
MC
1133 unsigned int nr_frags;
1134
c0c050c5
MC
1135 nr_frags = --shinfo->nr_frags;
1136 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
c0c050c5
MC
1137 cons_rx_buf->page = page;
1138
1139 /* Update prod since possibly some pages have been
1140 * allocated already.
1141 */
1142 rxr->rx_agg_prod = prod;
4a228a3a 1143 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
ca1df2dd 1144 return 0;
c0c050c5
MC
1145 }
1146
c519fe9a 1147 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
9a6aa350 1148 bp->rx_dir,
c519fe9a 1149 DMA_ATTR_WEAK_ORDERING);
c0c050c5 1150
ca1df2dd 1151 total_frag_len += frag_len;
c0c050c5 1152 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1153 }
1154 rxr->rx_agg_prod = prod;
ca1df2dd
AG
1155 return total_frag_len;
1156}
1157
23e4c046
AG
1158static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1159 struct bnxt_cp_ring_info *cpr,
1160 struct sk_buff *skb, u16 idx,
1161 u32 agg_bufs, bool tpa)
ca1df2dd
AG
1162{
1163 struct skb_shared_info *shinfo = skb_shinfo(skb);
1164 u32 total_frag_len = 0;
1165
31b9998b
AG
1166 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1167 agg_bufs, tpa, NULL);
ca1df2dd
AG
1168 if (!total_frag_len) {
1169 dev_kfree_skb(skb);
1170 return NULL;
1171 }
1172
1173 skb->data_len += total_frag_len;
1174 skb->len += total_frag_len;
1175 skb->truesize += PAGE_SIZE * agg_bufs;
c0c050c5
MC
1176 return skb;
1177}
1178
4c6c123c
AG
1179static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1180 struct bnxt_cp_ring_info *cpr,
1181 struct xdp_buff *xdp, u16 idx,
1182 u32 agg_bufs, bool tpa)
1183{
1184 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1185 u32 total_frag_len = 0;
1186
1187 if (!xdp_buff_has_frags(xdp))
1188 shinfo->nr_frags = 0;
1189
31b9998b
AG
1190 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1191 idx, agg_bufs, tpa, xdp);
4c6c123c
AG
1192 if (total_frag_len) {
1193 xdp_buff_set_frags_flag(xdp);
1194 shinfo->nr_frags = agg_bufs;
1195 shinfo->xdp_frags_size = total_frag_len;
1196 }
1197 return total_frag_len;
1198}
1199
c0c050c5
MC
1200static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1201 u8 agg_bufs, u32 *raw_cons)
1202{
1203 u16 last;
1204 struct rx_agg_cmp *agg;
1205
1206 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1207 last = RING_CMP(*raw_cons);
1208 agg = (struct rx_agg_cmp *)
1209 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1210 return RX_AGG_CMP_VALID(agg, *raw_cons);
1211}
1212
1213static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1214 unsigned int len,
1215 dma_addr_t mapping)
1216{
1217 struct bnxt *bp = bnapi->bp;
1218 struct pci_dev *pdev = bp->pdev;
1219 struct sk_buff *skb;
1220
1221 skb = napi_alloc_skb(&bnapi->napi, len);
1222 if (!skb)
1223 return NULL;
1224
745fc05c
MC
1225 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1226 bp->rx_dir);
c0c050c5 1227
6bb19474
MC
1228 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1229 len + NET_IP_ALIGN);
c0c050c5 1230
745fc05c
MC
1231 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1232 bp->rx_dir);
c0c050c5
MC
1233
1234 skb_put(skb, len);
1235 return skb;
1236}
1237
e44758b7 1238static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1239 u32 *raw_cons, void *cmp)
1240{
fa7e2812
MC
1241 struct rx_cmp *rxcmp = cmp;
1242 u32 tmp_raw_cons = *raw_cons;
1243 u8 cmp_type, agg_bufs = 0;
1244
1245 cmp_type = RX_CMP_TYPE(rxcmp);
1246
1247 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1248 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1249 RX_CMP_AGG_BUFS) >>
1250 RX_CMP_AGG_BUFS_SHIFT;
1251 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1252 struct rx_tpa_end_cmp *tpa_end = cmp;
1253
bfcd8d79
MC
1254 if (bp->flags & BNXT_FLAG_CHIP_P5)
1255 return 0;
1256
4a228a3a 1257 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1258 }
1259
1260 if (agg_bufs) {
1261 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1262 return -EBUSY;
1263 }
1264 *raw_cons = tmp_raw_cons;
1265 return 0;
1266}
1267
230d1f0d
MC
1268static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1269{
b148bb23
MC
1270 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1271 return;
1272
230d1f0d
MC
1273 if (BNXT_PF(bp))
1274 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1275 else
1276 schedule_delayed_work(&bp->fw_reset_task, delay);
1277}
1278
c213eae8
MC
1279static void bnxt_queue_sp_work(struct bnxt *bp)
1280{
1281 if (BNXT_PF(bp))
1282 queue_work(bnxt_pf_wq, &bp->sp_task);
1283 else
1284 schedule_work(&bp->sp_task);
1285}
1286
fa7e2812
MC
1287static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1288{
1289 if (!rxr->bnapi->in_reset) {
1290 rxr->bnapi->in_reset = true;
8fbf58e1
MC
1291 if (bp->flags & BNXT_FLAG_CHIP_P5)
1292 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1293 else
1294 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
c213eae8 1295 bnxt_queue_sp_work(bp);
fa7e2812
MC
1296 }
1297 rxr->rx_next_cons = 0xffff;
1298}
1299
ec4d8e7c
MC
1300static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1301{
1302 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1303 u16 idx = agg_id & MAX_TPA_P5_MASK;
1304
1305 if (test_bit(idx, map->agg_idx_bmap))
1306 idx = find_first_zero_bit(map->agg_idx_bmap,
1307 BNXT_AGG_IDX_BMAP_SIZE);
1308 __set_bit(idx, map->agg_idx_bmap);
1309 map->agg_id_tbl[agg_id] = idx;
1310 return idx;
1311}
1312
1313static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1314{
1315 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1316
1317 __clear_bit(idx, map->agg_idx_bmap);
1318}
1319
1320static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1321{
1322 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1323
1324 return map->agg_id_tbl[agg_id];
1325}
1326
c0c050c5
MC
1327static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1328 struct rx_tpa_start_cmp *tpa_start,
1329 struct rx_tpa_start_cmp_ext *tpa_start1)
1330{
c0c050c5 1331 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1332 struct bnxt_tpa_info *tpa_info;
1333 u16 cons, prod, agg_id;
c0c050c5
MC
1334 struct rx_bd *prod_bd;
1335 dma_addr_t mapping;
1336
ec4d8e7c 1337 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1338 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1339 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1340 } else {
bfcd8d79 1341 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1342 }
c0c050c5
MC
1343 cons = tpa_start->rx_tpa_start_cmp_opaque;
1344 prod = rxr->rx_prod;
1345 cons_rx_buf = &rxr->rx_buf_ring[cons];
1346 prod_rx_buf = &rxr->rx_buf_ring[prod];
1347 tpa_info = &rxr->rx_tpa[agg_id];
1348
bfcd8d79
MC
1349 if (unlikely(cons != rxr->rx_next_cons ||
1350 TPA_START_ERROR(tpa_start))) {
1351 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1352 cons, rxr->rx_next_cons,
1353 TPA_START_ERROR_CODE(tpa_start1));
fa7e2812
MC
1354 bnxt_sched_reset(bp, rxr);
1355 return;
1356 }
ee5c7fb3
SP
1357 /* Store cfa_code in tpa_info to use in tpa_end
1358 * completion processing.
1359 */
1360 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1361 prod_rx_buf->data = tpa_info->data;
6bb19474 1362 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1363
1364 mapping = tpa_info->mapping;
11cd119d 1365 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1366
1367 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1368
1369 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1370
1371 tpa_info->data = cons_rx_buf->data;
6bb19474 1372 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1373 cons_rx_buf->data = NULL;
11cd119d 1374 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1375
1376 tpa_info->len =
1377 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1378 RX_TPA_START_CMP_LEN_SHIFT;
1379 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1380 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1381
1382 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1383 tpa_info->gso_type = SKB_GSO_TCPV4;
1384 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1385 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1386 tpa_info->gso_type = SKB_GSO_TCPV6;
1387 tpa_info->rss_hash =
1388 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1389 } else {
1390 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1391 tpa_info->gso_type = 0;
871127e6 1392 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
c0c050c5
MC
1393 }
1394 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1395 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1396 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1397 tpa_info->agg_count = 0;
c0c050c5
MC
1398
1399 rxr->rx_prod = NEXT_RX(prod);
1400 cons = NEXT_RX(cons);
376a5b86 1401 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1402 cons_rx_buf = &rxr->rx_buf_ring[cons];
1403
1404 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1405 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1406 cons_rx_buf->data = NULL;
1407}
1408
4a228a3a 1409static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1410{
1411 if (agg_bufs)
4a228a3a 1412 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1413}
1414
bee5a188
MC
1415#ifdef CONFIG_INET
1416static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1417{
1418 struct udphdr *uh = NULL;
1419
1420 if (ip_proto == htons(ETH_P_IP)) {
1421 struct iphdr *iph = (struct iphdr *)skb->data;
1422
1423 if (iph->protocol == IPPROTO_UDP)
1424 uh = (struct udphdr *)(iph + 1);
1425 } else {
1426 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1427
1428 if (iph->nexthdr == IPPROTO_UDP)
1429 uh = (struct udphdr *)(iph + 1);
1430 }
1431 if (uh) {
1432 if (uh->check)
1433 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1434 else
1435 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1436 }
1437}
1438#endif
1439
94758f8d
MC
1440static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1441 int payload_off, int tcp_ts,
1442 struct sk_buff *skb)
1443{
1444#ifdef CONFIG_INET
1445 struct tcphdr *th;
1446 int len, nw_off;
1447 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1448 u32 hdr_info = tpa_info->hdr_info;
1449 bool loopback = false;
1450
1451 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1452 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1453 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1454
1455 /* If the packet is an internal loopback packet, the offsets will
1456 * have an extra 4 bytes.
1457 */
1458 if (inner_mac_off == 4) {
1459 loopback = true;
1460 } else if (inner_mac_off > 4) {
1461 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1462 ETH_HLEN - 2));
1463
1464 /* We only support inner iPv4/ipv6. If we don't see the
1465 * correct protocol ID, it must be a loopback packet where
1466 * the offsets are off by 4.
1467 */
09a7636a 1468 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1469 loopback = true;
1470 }
1471 if (loopback) {
1472 /* internal loopback packet, subtract all offsets by 4 */
1473 inner_ip_off -= 4;
1474 inner_mac_off -= 4;
1475 outer_ip_off -= 4;
1476 }
1477
1478 nw_off = inner_ip_off - ETH_HLEN;
1479 skb_set_network_header(skb, nw_off);
1480 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1481 struct ipv6hdr *iph = ipv6_hdr(skb);
1482
1483 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1484 len = skb->len - skb_transport_offset(skb);
1485 th = tcp_hdr(skb);
1486 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1487 } else {
1488 struct iphdr *iph = ip_hdr(skb);
1489
1490 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1491 len = skb->len - skb_transport_offset(skb);
1492 th = tcp_hdr(skb);
1493 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1494 }
1495
1496 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1497 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1498 ETH_HLEN - 2));
1499
bee5a188 1500 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1501 }
1502#endif
1503 return skb;
1504}
1505
67912c36
MC
1506static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1507 int payload_off, int tcp_ts,
1508 struct sk_buff *skb)
1509{
1510#ifdef CONFIG_INET
1511 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1512 u32 hdr_info = tpa_info->hdr_info;
1513 int iphdr_len, nw_off;
1514
1515 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1516 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1517 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1518
1519 nw_off = inner_ip_off - ETH_HLEN;
1520 skb_set_network_header(skb, nw_off);
1521 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1522 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1523 skb_set_transport_header(skb, nw_off + iphdr_len);
1524
1525 if (inner_mac_off) { /* tunnel */
1526 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1527 ETH_HLEN - 2));
1528
1529 bnxt_gro_tunnel(skb, proto);
1530 }
1531#endif
1532 return skb;
1533}
1534
c0c050c5
MC
1535#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1536#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1537
309369c9
MC
1538static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1539 int payload_off, int tcp_ts,
c0c050c5
MC
1540 struct sk_buff *skb)
1541{
d1611c3a 1542#ifdef CONFIG_INET
c0c050c5 1543 struct tcphdr *th;
719ca811 1544 int len, nw_off, tcp_opt_len = 0;
27e24189 1545
309369c9 1546 if (tcp_ts)
c0c050c5
MC
1547 tcp_opt_len = 12;
1548
c0c050c5
MC
1549 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1550 struct iphdr *iph;
1551
1552 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1553 ETH_HLEN;
1554 skb_set_network_header(skb, nw_off);
1555 iph = ip_hdr(skb);
1556 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1557 len = skb->len - skb_transport_offset(skb);
1558 th = tcp_hdr(skb);
1559 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1560 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1561 struct ipv6hdr *iph;
1562
1563 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1564 ETH_HLEN;
1565 skb_set_network_header(skb, nw_off);
1566 iph = ipv6_hdr(skb);
1567 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1568 len = skb->len - skb_transport_offset(skb);
1569 th = tcp_hdr(skb);
1570 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1571 } else {
1572 dev_kfree_skb_any(skb);
1573 return NULL;
1574 }
c0c050c5 1575
bee5a188
MC
1576 if (nw_off) /* tunnel */
1577 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1578#endif
1579 return skb;
1580}
1581
309369c9
MC
1582static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1583 struct bnxt_tpa_info *tpa_info,
1584 struct rx_tpa_end_cmp *tpa_end,
1585 struct rx_tpa_end_cmp_ext *tpa_end1,
1586 struct sk_buff *skb)
1587{
1588#ifdef CONFIG_INET
1589 int payload_off;
1590 u16 segs;
1591
1592 segs = TPA_END_TPA_SEGS(tpa_end);
1593 if (segs == 1)
1594 return skb;
1595
1596 NAPI_GRO_CB(skb)->count = segs;
1597 skb_shinfo(skb)->gso_size =
1598 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1599 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1600 if (bp->flags & BNXT_FLAG_CHIP_P5)
1601 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1602 else
1603 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1604 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1605 if (likely(skb))
1606 tcp_gro_complete(skb);
309369c9
MC
1607#endif
1608 return skb;
1609}
1610
ee5c7fb3
SP
1611/* Given the cfa_code of a received packet determine which
1612 * netdev (vf-rep or PF) the packet is destined to.
1613 */
1614static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1615{
1616 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1617
1618 /* if vf-rep dev is NULL, the must belongs to the PF */
1619 return dev ? dev : bp->dev;
1620}
1621
c0c050c5 1622static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1623 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1624 u32 *raw_cons,
1625 struct rx_tpa_end_cmp *tpa_end,
1626 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1627 u8 *event)
c0c050c5 1628{
e44758b7 1629 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1630 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1631 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1632 unsigned int len;
1633 struct bnxt_tpa_info *tpa_info;
1634 dma_addr_t mapping;
1635 struct sk_buff *skb;
bfcd8d79 1636 u16 idx = 0, agg_id;
6bb19474 1637 void *data;
bfcd8d79 1638 bool gro;
c0c050c5 1639
fa7e2812 1640 if (unlikely(bnapi->in_reset)) {
e44758b7 1641 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1642
1643 if (rc < 0)
1644 return ERR_PTR(-EBUSY);
1645 return NULL;
1646 }
1647
bfcd8d79
MC
1648 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1649 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1650 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1651 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1652 tpa_info = &rxr->rx_tpa[agg_id];
1653 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1654 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1655 agg_bufs, tpa_info->agg_count);
1656 agg_bufs = tpa_info->agg_count;
1657 }
1658 tpa_info->agg_count = 0;
1659 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1660 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1661 idx = agg_id;
1662 gro = !!(bp->flags & BNXT_FLAG_GRO);
1663 } else {
1664 agg_id = TPA_END_AGG_ID(tpa_end);
1665 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1666 tpa_info = &rxr->rx_tpa[agg_id];
1667 idx = RING_CMP(*raw_cons);
1668 if (agg_bufs) {
1669 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1670 return ERR_PTR(-EBUSY);
1671
1672 *event |= BNXT_AGG_EVENT;
1673 idx = NEXT_CMP(idx);
1674 }
1675 gro = !!TPA_END_GRO(tpa_end);
1676 }
c0c050c5 1677 data = tpa_info->data;
6bb19474
MC
1678 data_ptr = tpa_info->data_ptr;
1679 prefetch(data_ptr);
c0c050c5
MC
1680 len = tpa_info->len;
1681 mapping = tpa_info->mapping;
1682
69c149e2 1683 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1684 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1685 if (agg_bufs > MAX_SKB_FRAGS)
1686 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1687 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1688 return NULL;
1689 }
1690
1691 if (len <= bp->rx_copy_thresh) {
6bb19474 1692 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1693 if (!skb) {
4a228a3a 1694 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1695 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1696 return NULL;
1697 }
1698 } else {
1699 u8 *new_data;
1700 dma_addr_t new_mapping;
1701
720908e5 1702 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
c0c050c5 1703 if (!new_data) {
4a228a3a 1704 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1705 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1706 return NULL;
1707 }
1708
1709 tpa_info->data = new_data;
b3dba77c 1710 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1711 tpa_info->mapping = new_mapping;
1712
720908e5 1713 skb = build_skb(data, bp->rx_buf_size);
c519fe9a
SN
1714 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1715 bp->rx_buf_use_size, bp->rx_dir,
1716 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1717
1718 if (!skb) {
720908e5 1719 skb_free_frag(data);
4a228a3a 1720 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1721 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1722 return NULL;
1723 }
b3dba77c 1724 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1725 skb_put(skb, len);
1726 }
1727
1728 if (agg_bufs) {
23e4c046 1729 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1730 if (!skb) {
1731 /* Page reuse already handled by bnxt_rx_pages(). */
907fd4a2 1732 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1733 return NULL;
1734 }
1735 }
ee5c7fb3
SP
1736
1737 skb->protocol =
1738 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1739
1740 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1741 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1742
8852ddb4 1743 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
a196e96b 1744 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
96bdd4b9
MC
1745 __be16 vlan_proto = htons(tpa_info->metadata >>
1746 RX_CMP_FLAGS2_METADATA_TPID_SFT);
ed7bc602 1747 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1748
96bdd4b9
MC
1749 if (eth_type_vlan(vlan_proto)) {
1750 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1751 } else {
1752 dev_kfree_skb(skb);
1753 return NULL;
1754 }
c0c050c5
MC
1755 }
1756
1757 skb_checksum_none_assert(skb);
1758 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1759 skb->ip_summed = CHECKSUM_UNNECESSARY;
1760 skb->csum_level =
1761 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1762 }
1763
bfcd8d79 1764 if (gro)
309369c9 1765 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1766
1767 return skb;
1768}
1769
8fe88ce7
MC
1770static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1771 struct rx_agg_cmp *rx_agg)
1772{
1773 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1774 struct bnxt_tpa_info *tpa_info;
1775
ec4d8e7c 1776 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1777 tpa_info = &rxr->rx_tpa[agg_id];
1778 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1779 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1780}
1781
ee5c7fb3
SP
1782static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1783 struct sk_buff *skb)
1784{
1785 if (skb->dev != bp->dev) {
1786 /* this packet belongs to a vf-rep */
1787 bnxt_vf_rep_rx(bp, skb);
1788 return;
1789 }
1790 skb_record_rx_queue(skb, bnapi->index);
1791 napi_gro_receive(&bnapi->napi, skb);
1792}
1793
c0c050c5
MC
1794/* returns the following:
1795 * 1 - 1 packet successfully received
1796 * 0 - successful TPA_START, packet not completed yet
1797 * -EBUSY - completion ring does not have all the agg buffers yet
1798 * -ENOMEM - packet aborted due to out of memory
1799 * -EIO - packet aborted due to hw error indicated in BD
1800 */
e44758b7
MC
1801static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1802 u32 *raw_cons, u8 *event)
c0c050c5 1803{
e44758b7 1804 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1805 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1806 struct net_device *dev = bp->dev;
1807 struct rx_cmp *rxcmp;
1808 struct rx_cmp_ext *rxcmp1;
1809 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1810 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1811 struct bnxt_sw_rx_bd *rx_buf;
1812 unsigned int len;
6bb19474 1813 u8 *data_ptr, agg_bufs, cmp_type;
ee536dcb 1814 bool xdp_active = false;
c0c050c5
MC
1815 dma_addr_t dma_addr;
1816 struct sk_buff *skb;
b231c3f3 1817 struct xdp_buff xdp;
7f5515d1 1818 u32 flags, misc;
6bb19474 1819 void *data;
c0c050c5
MC
1820 int rc = 0;
1821
1822 rxcmp = (struct rx_cmp *)
1823 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1824
8fe88ce7
MC
1825 cmp_type = RX_CMP_TYPE(rxcmp);
1826
1827 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1828 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1829 goto next_rx_no_prod_no_len;
1830 }
1831
c0c050c5
MC
1832 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1833 cp_cons = RING_CMP(tmp_raw_cons);
1834 rxcmp1 = (struct rx_cmp_ext *)
1835 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1836
1837 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1838 return -EBUSY;
1839
828affc2
MC
1840 /* The valid test of the entry must be done first before
1841 * reading any further.
1842 */
1843 dma_rmb();
c0c050c5
MC
1844 prod = rxr->rx_prod;
1845
1846 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1847 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1848 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1849
4e5dbbda 1850 *event |= BNXT_RX_EVENT;
e7e70fa6 1851 goto next_rx_no_prod_no_len;
c0c050c5
MC
1852
1853 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1854 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1855 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1856 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1857
1fac4b2f 1858 if (IS_ERR(skb))
c0c050c5
MC
1859 return -EBUSY;
1860
1861 rc = -ENOMEM;
1862 if (likely(skb)) {
ee5c7fb3 1863 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1864 rc = 1;
1865 }
4e5dbbda 1866 *event |= BNXT_RX_EVENT;
e7e70fa6 1867 goto next_rx_no_prod_no_len;
c0c050c5
MC
1868 }
1869
1870 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1871 if (unlikely(cons != rxr->rx_next_cons)) {
bbd6f0a9 1872 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
fa7e2812 1873
1b5c8b63
MC
1874 /* 0xffff is forced error, don't print it */
1875 if (rxr->rx_next_cons != 0xffff)
1876 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1877 cons, rxr->rx_next_cons);
fa7e2812 1878 bnxt_sched_reset(bp, rxr);
bbd6f0a9
MC
1879 if (rc1)
1880 return rc1;
1881 goto next_rx_no_prod_no_len;
fa7e2812 1882 }
a1b0e4e6
MC
1883 rx_buf = &rxr->rx_buf_ring[cons];
1884 data = rx_buf->data;
1885 data_ptr = rx_buf->data_ptr;
6bb19474 1886 prefetch(data_ptr);
c0c050c5 1887
c61fb99c
MC
1888 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1889 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1890
1891 if (agg_bufs) {
1892 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1893 return -EBUSY;
1894
1895 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1896 *event |= BNXT_AGG_EVENT;
c0c050c5 1897 }
4e5dbbda 1898 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1899
1900 rx_buf->data = NULL;
1901 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1902 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1903
c0c050c5
MC
1904 bnxt_reuse_rx_data(rxr, cons, data);
1905 if (agg_bufs)
4a228a3a
MC
1906 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1907 false);
c0c050c5
MC
1908
1909 rc = -EIO;
8e44e96c 1910 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
9d8b5f05 1911 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
8d4bd96b
MC
1912 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1913 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
8fbf58e1
MC
1914 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1915 rx_err);
19b3751f
MC
1916 bnxt_sched_reset(bp, rxr);
1917 }
8e44e96c 1918 }
0b397b17 1919 goto next_rx_no_len;
c0c050c5
MC
1920 }
1921
7f5515d1
PC
1922 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1923 len = flags >> RX_CMP_LEN_SHIFT;
11cd119d 1924 dma_addr = rx_buf->mapping;
c0c050c5 1925
b231c3f3 1926 if (bnxt_xdp_attached(bp, rxr)) {
bbfc17e5 1927 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
4c6c123c
AG
1928 if (agg_bufs) {
1929 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
1930 cp_cons, agg_bufs,
1931 false);
1932 if (!frag_len) {
1933 cpr->sw_stats.rx.rx_oom_discards += 1;
1934 rc = -ENOMEM;
1935 goto next_rx;
1936 }
1937 }
ee536dcb
AG
1938 xdp_active = true;
1939 }
1940
9f4b2830 1941 if (xdp_active) {
9b3e6078 1942 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
b231c3f3
AG
1943 rc = 1;
1944 goto next_rx;
1945 }
c6d30e83 1946 }
ee536dcb 1947
c0c050c5 1948 if (len <= bp->rx_copy_thresh) {
6bb19474 1949 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1950 bnxt_reuse_rx_data(rxr, cons, data);
1951 if (!skb) {
a7559bc8
AG
1952 if (agg_bufs) {
1953 if (!xdp_active)
1954 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1955 agg_bufs, false);
1956 else
1957 bnxt_xdp_buff_frags_free(rxr, &xdp);
1958 }
907fd4a2 1959 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1960 rc = -ENOMEM;
1961 goto next_rx;
1962 }
1963 } else {
c61fb99c
MC
1964 u32 payload;
1965
c6d30e83
MC
1966 if (rx_buf->data_ptr == data_ptr)
1967 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1968 else
1969 payload = 0;
6bb19474 1970 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1971 payload | len);
c0c050c5 1972 if (!skb) {
907fd4a2 1973 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1974 rc = -ENOMEM;
1975 goto next_rx;
1976 }
1977 }
1978
1979 if (agg_bufs) {
32861236
AG
1980 if (!xdp_active) {
1981 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
1982 if (!skb) {
1983 cpr->sw_stats.rx.rx_oom_discards += 1;
1984 rc = -ENOMEM;
1985 goto next_rx;
1986 }
1dc4c557
AG
1987 } else {
1988 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
1989 if (!skb) {
1990 /* we should be able to free the old skb here */
a7559bc8 1991 bnxt_xdp_buff_frags_free(rxr, &xdp);
1dc4c557
AG
1992 cpr->sw_stats.rx.rx_oom_discards += 1;
1993 rc = -ENOMEM;
1994 goto next_rx;
1995 }
c0c050c5
MC
1996 }
1997 }
1998
1999 if (RX_CMP_HASH_VALID(rxcmp)) {
2000 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
2001 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
2002
2003 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
2004 if (hash_type != 1 && hash_type != 3)
2005 type = PKT_HASH_TYPE_L3;
2006 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2007 }
2008
ee5c7fb3
SP
2009 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
2010 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 2011
8852ddb4
MC
2012 if ((rxcmp1->rx_cmp_flags2 &
2013 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
a196e96b 2014 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
c0c050c5 2015 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 2016 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
96bdd4b9
MC
2017 __be16 vlan_proto = htons(meta_data >>
2018 RX_CMP_FLAGS2_METADATA_TPID_SFT);
c0c050c5 2019
96bdd4b9
MC
2020 if (eth_type_vlan(vlan_proto)) {
2021 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2022 } else {
2023 dev_kfree_skb(skb);
2024 goto next_rx;
2025 }
c0c050c5
MC
2026 }
2027
2028 skb_checksum_none_assert(skb);
2029 if (RX_CMP_L4_CS_OK(rxcmp1)) {
2030 if (dev->features & NETIF_F_RXCSUM) {
2031 skb->ip_summed = CHECKSUM_UNNECESSARY;
2032 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2033 }
2034 } else {
665e350d
SB
2035 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2036 if (dev->features & NETIF_F_RXCSUM)
9d8b5f05 2037 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
665e350d 2038 }
c0c050c5
MC
2039 }
2040
7f5515d1 2041 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
66ed81dc 2042 RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
7f5515d1
PC
2043 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2044 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2045 u64 ns, ts;
2046
2047 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2048 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2049
2050 spin_lock_bh(&ptp->ptp_lock);
2051 ns = timecounter_cyc2time(&ptp->tc, ts);
2052 spin_unlock_bh(&ptp->ptp_lock);
2053 memset(skb_hwtstamps(skb), 0,
2054 sizeof(*skb_hwtstamps(skb)));
2055 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2056 }
2057 }
2058 }
ee5c7fb3 2059 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
2060 rc = 1;
2061
2062next_rx:
6a8788f2
AG
2063 cpr->rx_packets += 1;
2064 cpr->rx_bytes += len;
e7e70fa6 2065
0b397b17
MC
2066next_rx_no_len:
2067 rxr->rx_prod = NEXT_RX(prod);
2068 rxr->rx_next_cons = NEXT_RX(cons);
2069
e7e70fa6 2070next_rx_no_prod_no_len:
c0c050c5
MC
2071 *raw_cons = tmp_raw_cons;
2072
2073 return rc;
2074}
2075
2270bc5d
MC
2076/* In netpoll mode, if we are using a combined completion ring, we need to
2077 * discard the rx packets and recycle the buffers.
2078 */
e44758b7
MC
2079static int bnxt_force_rx_discard(struct bnxt *bp,
2080 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
2081 u32 *raw_cons, u8 *event)
2082{
2270bc5d
MC
2083 u32 tmp_raw_cons = *raw_cons;
2084 struct rx_cmp_ext *rxcmp1;
2085 struct rx_cmp *rxcmp;
2086 u16 cp_cons;
2087 u8 cmp_type;
40bedf7c 2088 int rc;
2270bc5d
MC
2089
2090 cp_cons = RING_CMP(tmp_raw_cons);
2091 rxcmp = (struct rx_cmp *)
2092 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2093
2094 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2095 cp_cons = RING_CMP(tmp_raw_cons);
2096 rxcmp1 = (struct rx_cmp_ext *)
2097 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2098
2099 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2100 return -EBUSY;
2101
828affc2
MC
2102 /* The valid test of the entry must be done first before
2103 * reading any further.
2104 */
2105 dma_rmb();
2270bc5d
MC
2106 cmp_type = RX_CMP_TYPE(rxcmp);
2107 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2108 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2109 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2110 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2111 struct rx_tpa_end_cmp_ext *tpa_end1;
2112
2113 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2114 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2115 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2116 }
40bedf7c
JK
2117 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2118 if (rc && rc != -EBUSY)
2119 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2120 return rc;
2270bc5d
MC
2121}
2122
7e914027
MC
2123u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2124{
2125 struct bnxt_fw_health *fw_health = bp->fw_health;
2126 u32 reg = fw_health->regs[reg_idx];
2127 u32 reg_type, reg_off, val = 0;
2128
2129 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2130 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2131 switch (reg_type) {
2132 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2133 pci_read_config_dword(bp->pdev, reg_off, &val);
2134 break;
2135 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2136 reg_off = fw_health->mapped_regs[reg_idx];
df561f66 2137 fallthrough;
7e914027
MC
2138 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2139 val = readl(bp->bar0 + reg_off);
2140 break;
2141 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2142 val = readl(bp->bar1 + reg_off);
2143 break;
2144 }
2145 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2146 val &= fw_health->fw_reset_inprog_reg_mask;
2147 return val;
2148}
2149
8d4bd96b
MC
2150static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2151{
2152 int i;
2153
2154 for (i = 0; i < bp->rx_nr_rings; i++) {
2155 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2156 struct bnxt_ring_grp_info *grp_info;
2157
2158 grp_info = &bp->grp_info[grp_idx];
2159 if (grp_info->agg_fw_ring_id == ring_id)
2160 return grp_idx;
2161 }
2162 return INVALID_HW_RING_ID;
2163}
2164
abf90ac2
PC
2165static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2166{
0fb8582a
MC
2167 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2168
2169 switch (err_type) {
abf90ac2
PC
2170 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2171 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2172 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2173 break;
5a717f4a
SK
2174 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2175 netdev_warn(bp->dev, "Pause Storm detected!\n");
2176 break;
0fb8582a
MC
2177 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2178 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2179 break;
abf90ac2 2180 default:
0fb8582a
MC
2181 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2182 err_type);
abf90ac2
PC
2183 break;
2184 }
2185}
2186
4bb13abf 2187#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
2188 ((data) & \
2189 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 2190
8d4bd96b
MC
2191#define BNXT_EVENT_RING_TYPE(data2) \
2192 ((data2) & \
2193 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2194
2195#define BNXT_EVENT_RING_TYPE_RX(data2) \
2196 (BNXT_EVENT_RING_TYPE(data2) == \
2197 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2198
8bcf6f04
PC
2199#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2200 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2201 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2202
2203#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2204 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2205 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2206
2207#define BNXT_PHC_BITS 48
2208
c0c050c5
MC
2209static int bnxt_async_event_process(struct bnxt *bp,
2210 struct hwrm_async_event_cmpl *cmpl)
2211{
2212 u16 event_id = le16_to_cpu(cmpl->event_id);
03ab8ca1
MC
2213 u32 data1 = le32_to_cpu(cmpl->event_data1);
2214 u32 data2 = le32_to_cpu(cmpl->event_data2);
c0c050c5 2215
8fa4219d
EP
2216 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2217 event_id, data1, data2);
2218
c0c050c5
MC
2219 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2220 switch (event_id) {
87c374de 2221 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
2222 struct bnxt_link_info *link_info = &bp->link_info;
2223
2224 if (BNXT_VF(bp))
2225 goto async_event_process_exit;
a8168b6c
MC
2226
2227 /* print unsupported speed warning in forced speed mode only */
2228 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2229 (data1 & 0x20000)) {
8cbde117
MC
2230 u16 fw_speed = link_info->force_link_speed;
2231 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2232
a8168b6c
MC
2233 if (speed != SPEED_UNKNOWN)
2234 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2235 speed);
8cbde117 2236 }
286ef9d6 2237 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 2238 }
df561f66 2239 fallthrough;
b1613e78
MC
2240 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2241 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2242 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
df561f66 2243 fallthrough;
87c374de 2244 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 2245 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 2246 break;
87c374de 2247 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 2248 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 2249 break;
87c374de 2250 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
2251 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2252
2253 if (BNXT_VF(bp))
2254 break;
2255
2256 if (bp->pf.port_id != port_id)
2257 break;
2258
4bb13abf
MC
2259 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2260 break;
2261 }
87c374de 2262 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
2263 if (BNXT_PF(bp))
2264 goto async_event_process_exit;
2265 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2266 break;
5863b10a 2267 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
aadb0b1a 2268 char *type_str = "Solicited";
5863b10a 2269
8280b38e
VV
2270 if (!bp->fw_health)
2271 goto async_event_process_exit;
2272
2151fe08
MC
2273 bp->fw_reset_timestamp = jiffies;
2274 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2275 if (!bp->fw_reset_min_dsecs)
2276 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2277 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2278 if (!bp->fw_reset_max_dsecs)
2279 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
8f6c5e4d
EP
2280 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2281 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2282 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
aadb0b1a 2283 type_str = "Fatal";
8cc95ceb 2284 bp->fw_health->fatalities++;
acfb50e4 2285 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
aadb0b1a
EP
2286 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2287 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2288 type_str = "Non-fatal";
8cc95ceb 2289 bp->fw_health->survivals++;
aadb0b1a 2290 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
5863b10a 2291 }
871127e6 2292 netif_warn(bp, hw, bp->dev,
aadb0b1a
EP
2293 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2294 type_str, data1, data2,
871127e6
MC
2295 bp->fw_reset_min_dsecs * 100,
2296 bp->fw_reset_max_dsecs * 100);
2151fe08
MC
2297 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2298 break;
5863b10a 2299 }
7e914027
MC
2300 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2301 struct bnxt_fw_health *fw_health = bp->fw_health;
1596847d
EP
2302 char *status_desc = "healthy";
2303 u32 status;
7e914027
MC
2304
2305 if (!fw_health)
2306 goto async_event_process_exit;
2307
1b2b9183
MC
2308 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2309 fw_health->enabled = false;
1596847d 2310 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
7e914027 2311 break;
f4d95c3c 2312 }
1596847d 2313 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
7e914027
MC
2314 fw_health->tmr_multiplier =
2315 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2316 bp->current_interval * 10);
2317 fw_health->tmr_counter = fw_health->tmr_multiplier;
eca4cf12 2318 if (!fw_health->enabled)
1b2b9183
MC
2319 fw_health->last_fw_heartbeat =
2320 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
eca4cf12
MC
2321 fw_health->last_fw_reset_cnt =
2322 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
1596847d
EP
2323 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2324 if (status != BNXT_FW_STATUS_HEALTHY)
2325 status_desc = "unhealthy";
f4d95c3c 2326 netif_info(bp, drv, bp->dev,
1596847d
EP
2327 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2328 fw_health->primary ? "primary" : "backup", status,
2329 status_desc, fw_health->last_fw_reset_cnt);
1b2b9183
MC
2330 if (!fw_health->enabled) {
2331 /* Make sure tmr_counter is set and visible to
2332 * bnxt_health_check() before setting enabled to true.
2333 */
2334 smp_wmb();
2335 fw_health->enabled = true;
2336 }
7e914027
MC
2337 goto async_event_process_exit;
2338 }
a44daa8f 2339 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
871127e6
MC
2340 netif_notice(bp, hw, bp->dev,
2341 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2342 data1, data2);
a44daa8f 2343 goto async_event_process_exit;
8d4bd96b 2344 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
8d4bd96b
MC
2345 struct bnxt_rx_ring_info *rxr;
2346 u16 grp_idx;
2347
2348 if (bp->flags & BNXT_FLAG_CHIP_P5)
2349 goto async_event_process_exit;
2350
2351 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2352 BNXT_EVENT_RING_TYPE(data2), data1);
2353 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2354 goto async_event_process_exit;
2355
2356 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2357 if (grp_idx == INVALID_HW_RING_ID) {
2358 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2359 data1);
2360 goto async_event_process_exit;
2361 }
2362 rxr = bp->bnapi[grp_idx]->rx_ring;
2363 bnxt_sched_reset(bp, rxr);
2364 goto async_event_process_exit;
2365 }
df97b34d
MC
2366 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2367 struct bnxt_fw_health *fw_health = bp->fw_health;
2368
2369 netif_notice(bp, hw, bp->dev,
2370 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2371 data1, data2);
2372 if (fw_health) {
2373 fw_health->echo_req_data1 = data1;
2374 fw_health->echo_req_data2 = data2;
2375 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2376 break;
2377 }
2378 goto async_event_process_exit;
2379 }
099fdeda
PC
2380 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2381 bnxt_ptp_pps_event(bp, data1, data2);
abf90ac2
PC
2382 goto async_event_process_exit;
2383 }
2384 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2385 bnxt_event_error_report(bp, data1, data2);
099fdeda
PC
2386 goto async_event_process_exit;
2387 }
8bcf6f04
PC
2388 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2389 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2390 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2391 if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
2392 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2393 u64 ns;
2394
2395 spin_lock_bh(&ptp->ptp_lock);
2396 bnxt_ptp_update_current_time(bp);
2397 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2398 BNXT_PHC_BITS) | ptp->current_time);
2399 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2400 spin_unlock_bh(&ptp->ptp_lock);
2401 }
2402 break;
2403 }
2404 goto async_event_process_exit;
2405 }
68f684e2
EP
2406 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2407 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2408
2409 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2410 goto async_event_process_exit;
2411 }
c0c050c5 2412 default:
19241368 2413 goto async_event_process_exit;
c0c050c5 2414 }
c213eae8 2415 bnxt_queue_sp_work(bp);
19241368 2416async_event_process_exit:
a588e458 2417 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
2418 return 0;
2419}
2420
2421static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2422{
2423 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2424 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2425 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2426 (struct hwrm_fwd_req_cmpl *)txcmp;
2427
2428 switch (cmpl_type) {
2429 case CMPL_BASE_TYPE_HWRM_DONE:
2430 seq_id = le16_to_cpu(h_cmpl->sequence_id);
68f684e2 2431 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
c0c050c5
MC
2432 break;
2433
2434 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2435 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2436
2437 if ((vf_id < bp->pf.first_vf_id) ||
2438 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2439 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2440 vf_id);
2441 return -EINVAL;
2442 }
2443
2444 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2445 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 2446 bnxt_queue_sp_work(bp);
c0c050c5
MC
2447 break;
2448
2449 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2450 bnxt_async_event_process(bp,
2451 (struct hwrm_async_event_cmpl *)txcmp);
cc9fd180 2452 break;
c0c050c5
MC
2453
2454 default:
2455 break;
2456 }
2457
2458 return 0;
2459}
2460
2461static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2462{
2463 struct bnxt_napi *bnapi = dev_instance;
2464 struct bnxt *bp = bnapi->bp;
2465 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2466 u32 cons = RING_CMP(cpr->cp_raw_cons);
2467
6a8788f2 2468 cpr->event_ctr++;
c0c050c5
MC
2469 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2470 napi_schedule(&bnapi->napi);
2471 return IRQ_HANDLED;
2472}
2473
2474static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2475{
2476 u32 raw_cons = cpr->cp_raw_cons;
2477 u16 cons = RING_CMP(raw_cons);
2478 struct tx_cmp *txcmp;
2479
2480 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2481
2482 return TX_CMP_VALID(txcmp, raw_cons);
2483}
2484
c0c050c5
MC
2485static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2486{
2487 struct bnxt_napi *bnapi = dev_instance;
2488 struct bnxt *bp = bnapi->bp;
2489 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2490 u32 cons = RING_CMP(cpr->cp_raw_cons);
2491 u32 int_status;
2492
2493 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2494
2495 if (!bnxt_has_work(bp, cpr)) {
11809490 2496 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2497 /* return if erroneous interrupt */
2498 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2499 return IRQ_NONE;
2500 }
2501
2502 /* disable ring IRQ */
697197e5 2503 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2504
2505 /* Return here if interrupt is shared and is disabled. */
2506 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2507 return IRQ_HANDLED;
2508
2509 napi_schedule(&bnapi->napi);
2510 return IRQ_HANDLED;
2511}
2512
3675b92f
MC
2513static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2514 int budget)
c0c050c5 2515{
e44758b7 2516 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2517 u32 raw_cons = cpr->cp_raw_cons;
2518 u32 cons;
2519 int tx_pkts = 0;
2520 int rx_pkts = 0;
4e5dbbda 2521 u8 event = 0;
c0c050c5
MC
2522 struct tx_cmp *txcmp;
2523
0fcec985 2524 cpr->has_more_work = 0;
340ac85e 2525 cpr->had_work_done = 1;
c0c050c5
MC
2526 while (1) {
2527 int rc;
2528
2529 cons = RING_CMP(raw_cons);
2530 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2531
2532 if (!TX_CMP_VALID(txcmp, raw_cons))
2533 break;
2534
67a95e20
MC
2535 /* The valid test of the entry must be done first before
2536 * reading any further.
2537 */
b67daab0 2538 dma_rmb();
c0c050c5
MC
2539 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2540 tx_pkts++;
2541 /* return full budget so NAPI will complete. */
5bed8b07 2542 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
c0c050c5 2543 rx_pkts = budget;
73f21c65 2544 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2545 if (budget)
2546 cpr->has_more_work = 1;
73f21c65
MC
2547 break;
2548 }
c0c050c5 2549 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2550 if (likely(budget))
e44758b7 2551 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2552 else
e44758b7 2553 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2554 &event);
c0c050c5
MC
2555 if (likely(rc >= 0))
2556 rx_pkts += rc;
903649e7
MC
2557 /* Increment rx_pkts when rc is -ENOMEM to count towards
2558 * the NAPI budget. Otherwise, we may potentially loop
2559 * here forever if we consistently cannot allocate
2560 * buffers.
2561 */
2edbdb31 2562 else if (rc == -ENOMEM && budget)
903649e7 2563 rx_pkts++;
c0c050c5
MC
2564 else if (rc == -EBUSY) /* partial completion */
2565 break;
c0c050c5
MC
2566 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2567 CMPL_BASE_TYPE_HWRM_DONE) ||
2568 (TX_CMP_TYPE(txcmp) ==
2569 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2570 (TX_CMP_TYPE(txcmp) ==
2571 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2572 bnxt_hwrm_handler(bp, txcmp);
2573 }
2574 raw_cons = NEXT_RAW_CMP(raw_cons);
2575
0fcec985
MC
2576 if (rx_pkts && rx_pkts == budget) {
2577 cpr->has_more_work = 1;
c0c050c5 2578 break;
0fcec985 2579 }
c0c050c5
MC
2580 }
2581
f18c2b77 2582 if (event & BNXT_REDIRECT_EVENT)
b976969b 2583 xdp_do_flush();
f18c2b77 2584
38413406
MC
2585 if (event & BNXT_TX_EVENT) {
2586 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2587 u16 prod = txr->tx_prod;
2588
2589 /* Sync BD data before updating doorbell */
2590 wmb();
2591
697197e5 2592 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2593 }
2594
c0c050c5 2595 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2596 bnapi->tx_pkts += tx_pkts;
2597 bnapi->events |= event;
2598 return rx_pkts;
2599}
c0c050c5 2600
3675b92f
MC
2601static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2602{
2603 if (bnapi->tx_pkts) {
2604 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2605 bnapi->tx_pkts = 0;
2606 }
c0c050c5 2607
8fbf58e1 2608 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
b6ab4b01 2609 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2610
e8f267b0 2611 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
c0c050c5 2612 }
a7559bc8
AG
2613 if (bnapi->events & BNXT_AGG_EVENT) {
2614 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2615
2616 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2617 }
3675b92f
MC
2618 bnapi->events = 0;
2619}
2620
2621static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2622 int budget)
2623{
2624 struct bnxt_napi *bnapi = cpr->bnapi;
2625 int rx_pkts;
2626
2627 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2628
2629 /* ACK completion ring before freeing tx ring and producing new
2630 * buffers in rx/agg rings to prevent overflowing the completion
2631 * ring.
2632 */
2633 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2634
2635 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2636 return rx_pkts;
2637}
2638
10bbdaf5
PS
2639static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2640{
2641 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2642 struct bnxt *bp = bnapi->bp;
2643 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2644 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2645 struct tx_cmp *txcmp;
2646 struct rx_cmp_ext *rxcmp1;
2647 u32 cp_cons, tmp_raw_cons;
2648 u32 raw_cons = cpr->cp_raw_cons;
2649 u32 rx_pkts = 0;
4e5dbbda 2650 u8 event = 0;
10bbdaf5
PS
2651
2652 while (1) {
2653 int rc;
2654
2655 cp_cons = RING_CMP(raw_cons);
2656 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2657
2658 if (!TX_CMP_VALID(txcmp, raw_cons))
2659 break;
2660
828affc2
MC
2661 /* The valid test of the entry must be done first before
2662 * reading any further.
2663 */
2664 dma_rmb();
10bbdaf5
PS
2665 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2666 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2667 cp_cons = RING_CMP(tmp_raw_cons);
2668 rxcmp1 = (struct rx_cmp_ext *)
2669 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2670
2671 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2672 break;
2673
2674 /* force an error to recycle the buffer */
2675 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2676 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2677
e44758b7 2678 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2679 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2680 rx_pkts++;
2681 else if (rc == -EBUSY) /* partial completion */
2682 break;
2683 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2684 CMPL_BASE_TYPE_HWRM_DONE)) {
2685 bnxt_hwrm_handler(bp, txcmp);
2686 } else {
2687 netdev_err(bp->dev,
2688 "Invalid completion received on special ring\n");
2689 }
2690 raw_cons = NEXT_RAW_CMP(raw_cons);
2691
2692 if (rx_pkts == budget)
2693 break;
2694 }
2695
2696 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2697 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2698 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2699
434c975a 2700 if (event & BNXT_AGG_EVENT)
697197e5 2701 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2702
2703 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2704 napi_complete_done(napi, rx_pkts);
697197e5 2705 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2706 }
2707 return rx_pkts;
2708}
2709
c0c050c5
MC
2710static int bnxt_poll(struct napi_struct *napi, int budget)
2711{
2712 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2713 struct bnxt *bp = bnapi->bp;
2714 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2715 int work_done = 0;
2716
0da65f49
MC
2717 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2718 napi_complete(napi);
2719 return 0;
2720 }
c0c050c5 2721 while (1) {
e44758b7 2722 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2723
73f21c65
MC
2724 if (work_done >= budget) {
2725 if (!budget)
697197e5 2726 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2727 break;
73f21c65 2728 }
c0c050c5
MC
2729
2730 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2731 if (napi_complete_done(napi, work_done))
697197e5 2732 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2733 break;
2734 }
2735 }
6a8788f2 2736 if (bp->flags & BNXT_FLAG_DIM) {
f06d0ca4 2737 struct dim_sample dim_sample = {};
6a8788f2 2738
8960b389
TG
2739 dim_update_sample(cpr->event_ctr,
2740 cpr->rx_packets,
2741 cpr->rx_bytes,
2742 &dim_sample);
6a8788f2
AG
2743 net_dim(&cpr->dim, dim_sample);
2744 }
c0c050c5
MC
2745 return work_done;
2746}
2747
0fcec985
MC
2748static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2749{
2750 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2751 int i, work_done = 0;
2752
2753 for (i = 0; i < 2; i++) {
2754 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2755
2756 if (cpr2) {
2757 work_done += __bnxt_poll_work(bp, cpr2,
2758 budget - work_done);
2759 cpr->has_more_work |= cpr2->has_more_work;
2760 }
2761 }
2762 return work_done;
2763}
2764
2765static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
340ac85e 2766 u64 dbr_type)
0fcec985
MC
2767{
2768 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2769 int i;
2770
2771 for (i = 0; i < 2; i++) {
2772 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2773 struct bnxt_db_info *db;
2774
340ac85e 2775 if (cpr2 && cpr2->had_work_done) {
0fcec985 2776 db = &cpr2->cp_db;
c6132f6f
MC
2777 bnxt_writeq(bp, db->db_key64 | dbr_type |
2778 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
0fcec985
MC
2779 cpr2->had_work_done = 0;
2780 }
2781 }
2782 __bnxt_poll_work_done(bp, bnapi);
2783}
2784
2785static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2786{
2787 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2788 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
dc1f5d1e 2789 struct bnxt_cp_ring_info *cpr_rx;
0fcec985
MC
2790 u32 raw_cons = cpr->cp_raw_cons;
2791 struct bnxt *bp = bnapi->bp;
2792 struct nqe_cn *nqcmp;
2793 int work_done = 0;
2794 u32 cons;
2795
0da65f49
MC
2796 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2797 napi_complete(napi);
2798 return 0;
2799 }
0fcec985
MC
2800 if (cpr->has_more_work) {
2801 cpr->has_more_work = 0;
2802 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
0fcec985
MC
2803 }
2804 while (1) {
2805 cons = RING_CMP(raw_cons);
2806 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2807
2808 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
54a9062f
MC
2809 if (cpr->has_more_work)
2810 break;
2811
340ac85e 2812 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
0fcec985
MC
2813 cpr->cp_raw_cons = raw_cons;
2814 if (napi_complete_done(napi, work_done))
2815 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2816 cpr->cp_raw_cons);
dc1f5d1e 2817 goto poll_done;
0fcec985
MC
2818 }
2819
2820 /* The valid test of the entry must be done first before
2821 * reading any further.
2822 */
2823 dma_rmb();
2824
2825 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2826 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2827 struct bnxt_cp_ring_info *cpr2;
2828
195af579
MC
2829 /* No more budget for RX work */
2830 if (budget && work_done >= budget && idx == BNXT_RX_HDL)
2831 break;
2832
0fcec985
MC
2833 cpr2 = cpr->cp_ring_arr[idx];
2834 work_done += __bnxt_poll_work(bp, cpr2,
2835 budget - work_done);
54a9062f 2836 cpr->has_more_work |= cpr2->has_more_work;
0fcec985
MC
2837 } else {
2838 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2839 }
2840 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985 2841 }
340ac85e 2842 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
389a877a
MC
2843 if (raw_cons != cpr->cp_raw_cons) {
2844 cpr->cp_raw_cons = raw_cons;
2845 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2846 }
dc1f5d1e
AG
2847poll_done:
2848 cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
2849 if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2850 struct dim_sample dim_sample = {};
2851
2852 dim_update_sample(cpr->event_ctr,
2853 cpr_rx->rx_packets,
2854 cpr_rx->rx_bytes,
2855 &dim_sample);
2856 net_dim(&cpr->dim, dim_sample);
2857 }
0fcec985
MC
2858 return work_done;
2859}
2860
c0c050c5
MC
2861static void bnxt_free_tx_skbs(struct bnxt *bp)
2862{
2863 int i, max_idx;
2864 struct pci_dev *pdev = bp->pdev;
2865
b6ab4b01 2866 if (!bp->tx_ring)
c0c050c5
MC
2867 return;
2868
2869 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2870 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2871 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2872 int j;
2873
1affc01f
EP
2874 if (!txr->tx_buf_ring)
2875 continue;
2876
c0c050c5
MC
2877 for (j = 0; j < max_idx;) {
2878 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2879 struct sk_buff *skb;
c0c050c5
MC
2880 int k, last;
2881
f18c2b77
AG
2882 if (i < bp->tx_nr_rings_xdp &&
2883 tx_buf->action == XDP_REDIRECT) {
2884 dma_unmap_single(&pdev->dev,
2885 dma_unmap_addr(tx_buf, mapping),
2886 dma_unmap_len(tx_buf, len),
df70303d 2887 DMA_TO_DEVICE);
f18c2b77
AG
2888 xdp_return_frame(tx_buf->xdpf);
2889 tx_buf->action = 0;
2890 tx_buf->xdpf = NULL;
2891 j++;
2892 continue;
2893 }
2894
2895 skb = tx_buf->skb;
c0c050c5
MC
2896 if (!skb) {
2897 j++;
2898 continue;
2899 }
2900
2901 tx_buf->skb = NULL;
2902
2903 if (tx_buf->is_push) {
2904 dev_kfree_skb(skb);
2905 j += 2;
2906 continue;
2907 }
2908
2909 dma_unmap_single(&pdev->dev,
2910 dma_unmap_addr(tx_buf, mapping),
2911 skb_headlen(skb),
df70303d 2912 DMA_TO_DEVICE);
c0c050c5
MC
2913
2914 last = tx_buf->nr_frags;
2915 j += 2;
d612a579
MC
2916 for (k = 0; k < last; k++, j++) {
2917 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2918 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2919
d612a579 2920 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2921 dma_unmap_page(
2922 &pdev->dev,
2923 dma_unmap_addr(tx_buf, mapping),
df70303d 2924 skb_frag_size(frag), DMA_TO_DEVICE);
c0c050c5
MC
2925 }
2926 dev_kfree_skb(skb);
2927 }
2928 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2929 }
2930}
2931
975bc99a 2932static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
c0c050c5 2933{
975bc99a 2934 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 2935 struct pci_dev *pdev = bp->pdev;
975bc99a
MC
2936 struct bnxt_tpa_idx_map *map;
2937 int i, max_idx, max_agg_idx;
c0c050c5
MC
2938
2939 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2940 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
975bc99a
MC
2941 if (!rxr->rx_tpa)
2942 goto skip_rx_tpa_free;
c0c050c5 2943
975bc99a
MC
2944 for (i = 0; i < bp->max_tpa; i++) {
2945 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2946 u8 *data = tpa_info->data;
c0c050c5 2947
975bc99a
MC
2948 if (!data)
2949 continue;
c0c050c5 2950
975bc99a
MC
2951 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2952 bp->rx_buf_use_size, bp->rx_dir,
2953 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2954
975bc99a 2955 tpa_info->data = NULL;
c0c050c5 2956
720908e5 2957 skb_free_frag(data);
975bc99a 2958 }
c0c050c5 2959
975bc99a 2960skip_rx_tpa_free:
1affc01f
EP
2961 if (!rxr->rx_buf_ring)
2962 goto skip_rx_buf_free;
2963
975bc99a
MC
2964 for (i = 0; i < max_idx; i++) {
2965 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2966 dma_addr_t mapping = rx_buf->mapping;
2967 void *data = rx_buf->data;
c0c050c5 2968
975bc99a
MC
2969 if (!data)
2970 continue;
c0c050c5 2971
975bc99a
MC
2972 rx_buf->data = NULL;
2973 if (BNXT_RX_PAGE_MODE(bp)) {
2974 mapping -= bp->rx_dma_offset;
2975 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2976 bp->rx_dir,
2977 DMA_ATTR_WEAK_ORDERING);
2978 page_pool_recycle_direct(rxr->page_pool, data);
2979 } else {
2980 dma_unmap_single_attrs(&pdev->dev, mapping,
2981 bp->rx_buf_use_size, bp->rx_dir,
2982 DMA_ATTR_WEAK_ORDERING);
720908e5 2983 skb_free_frag(data);
c0c050c5 2984 }
975bc99a 2985 }
1affc01f
EP
2986
2987skip_rx_buf_free:
2988 if (!rxr->rx_agg_ring)
2989 goto skip_rx_agg_free;
2990
975bc99a
MC
2991 for (i = 0; i < max_agg_idx; i++) {
2992 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2993 struct page *page = rx_agg_buf->page;
c0c050c5 2994
975bc99a
MC
2995 if (!page)
2996 continue;
c0c050c5 2997
9a6aa350
AG
2998 if (BNXT_RX_PAGE_MODE(bp)) {
2999 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
3000 BNXT_RX_PAGE_SIZE, bp->rx_dir,
3001 DMA_ATTR_WEAK_ORDERING);
3002 rx_agg_buf->page = NULL;
3003 __clear_bit(i, rxr->rx_agg_bmap);
c0c050c5 3004
9a6aa350
AG
3005 page_pool_recycle_direct(rxr->page_pool, page);
3006 } else {
3007 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
3008 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
3009 DMA_ATTR_WEAK_ORDERING);
3010 rx_agg_buf->page = NULL;
3011 __clear_bit(i, rxr->rx_agg_bmap);
c0c050c5 3012
9a6aa350
AG
3013 __free_page(page);
3014 }
975bc99a 3015 }
1affc01f
EP
3016
3017skip_rx_agg_free:
975bc99a
MC
3018 if (rxr->rx_page) {
3019 __free_page(rxr->rx_page);
3020 rxr->rx_page = NULL;
c0c050c5 3021 }
975bc99a
MC
3022 map = rxr->rx_tpa_idx_map;
3023 if (map)
3024 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3025}
3026
3027static void bnxt_free_rx_skbs(struct bnxt *bp)
3028{
3029 int i;
3030
3031 if (!bp->rx_ring)
3032 return;
3033
3034 for (i = 0; i < bp->rx_nr_rings; i++)
3035 bnxt_free_one_rx_ring_skbs(bp, i);
c0c050c5
MC
3036}
3037
3038static void bnxt_free_skbs(struct bnxt *bp)
3039{
3040 bnxt_free_tx_skbs(bp);
3041 bnxt_free_rx_skbs(bp);
3042}
3043
41435c39
MC
3044static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
3045{
3046 u8 init_val = mem_init->init_val;
3047 u16 offset = mem_init->offset;
3048 u8 *p2 = p;
3049 int i;
3050
3051 if (!init_val)
3052 return;
3053 if (offset == BNXT_MEM_INVALID_OFFSET) {
3054 memset(p, init_val, len);
3055 return;
3056 }
3057 for (i = 0; i < len; i += mem_init->size)
3058 *(p2 + i + offset) = init_val;
3059}
3060
6fe19886 3061static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
3062{
3063 struct pci_dev *pdev = bp->pdev;
3064 int i;
3065
985941e1
MC
3066 if (!rmem->pg_arr)
3067 goto skip_pages;
3068
6fe19886
MC
3069 for (i = 0; i < rmem->nr_pages; i++) {
3070 if (!rmem->pg_arr[i])
c0c050c5
MC
3071 continue;
3072
6fe19886
MC
3073 dma_free_coherent(&pdev->dev, rmem->page_size,
3074 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 3075
6fe19886 3076 rmem->pg_arr[i] = NULL;
c0c050c5 3077 }
985941e1 3078skip_pages:
6fe19886 3079 if (rmem->pg_tbl) {
4f49b2b8
MC
3080 size_t pg_tbl_size = rmem->nr_pages * 8;
3081
3082 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3083 pg_tbl_size = rmem->page_size;
3084 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
3085 rmem->pg_tbl, rmem->pg_tbl_map);
3086 rmem->pg_tbl = NULL;
c0c050c5 3087 }
6fe19886
MC
3088 if (rmem->vmem_size && *rmem->vmem) {
3089 vfree(*rmem->vmem);
3090 *rmem->vmem = NULL;
c0c050c5
MC
3091 }
3092}
3093
6fe19886 3094static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 3095{
c0c050c5 3096 struct pci_dev *pdev = bp->pdev;
66cca20a 3097 u64 valid_bit = 0;
6fe19886 3098 int i;
c0c050c5 3099
66cca20a
MC
3100 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3101 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
3102 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3103 size_t pg_tbl_size = rmem->nr_pages * 8;
3104
3105 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3106 pg_tbl_size = rmem->page_size;
3107 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 3108 &rmem->pg_tbl_map,
c0c050c5 3109 GFP_KERNEL);
6fe19886 3110 if (!rmem->pg_tbl)
c0c050c5
MC
3111 return -ENOMEM;
3112 }
3113
6fe19886 3114 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
3115 u64 extra_bits = valid_bit;
3116
6fe19886
MC
3117 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3118 rmem->page_size,
3119 &rmem->dma_arr[i],
c0c050c5 3120 GFP_KERNEL);
6fe19886 3121 if (!rmem->pg_arr[i])
c0c050c5
MC
3122 return -ENOMEM;
3123
41435c39
MC
3124 if (rmem->mem_init)
3125 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
3126 rmem->page_size);
4f49b2b8 3127 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
3128 if (i == rmem->nr_pages - 2 &&
3129 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3130 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3131 else if (i == rmem->nr_pages - 1 &&
3132 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3133 extra_bits |= PTU_PTE_LAST;
3134 rmem->pg_tbl[i] =
3135 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3136 }
c0c050c5
MC
3137 }
3138
6fe19886
MC
3139 if (rmem->vmem_size) {
3140 *rmem->vmem = vzalloc(rmem->vmem_size);
3141 if (!(*rmem->vmem))
c0c050c5
MC
3142 return -ENOMEM;
3143 }
3144 return 0;
3145}
3146
4a228a3a
MC
3147static void bnxt_free_tpa_info(struct bnxt *bp)
3148{
3149 int i;
3150
3151 for (i = 0; i < bp->rx_nr_rings; i++) {
3152 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3153
ec4d8e7c
MC
3154 kfree(rxr->rx_tpa_idx_map);
3155 rxr->rx_tpa_idx_map = NULL;
79632e9b
MC
3156 if (rxr->rx_tpa) {
3157 kfree(rxr->rx_tpa[0].agg_arr);
3158 rxr->rx_tpa[0].agg_arr = NULL;
3159 }
4a228a3a
MC
3160 kfree(rxr->rx_tpa);
3161 rxr->rx_tpa = NULL;
3162 }
3163}
3164
3165static int bnxt_alloc_tpa_info(struct bnxt *bp)
3166{
79632e9b
MC
3167 int i, j, total_aggs = 0;
3168
3169 bp->max_tpa = MAX_TPA;
3170 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3171 if (!bp->max_tpa_v2)
3172 return 0;
3173 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3174 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3175 }
4a228a3a
MC
3176
3177 for (i = 0; i < bp->rx_nr_rings; i++) {
3178 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 3179 struct rx_agg_cmp *agg;
4a228a3a 3180
79632e9b 3181 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
3182 GFP_KERNEL);
3183 if (!rxr->rx_tpa)
3184 return -ENOMEM;
79632e9b
MC
3185
3186 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3187 continue;
3188 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3189 rxr->rx_tpa[0].agg_arr = agg;
3190 if (!agg)
3191 return -ENOMEM;
3192 for (j = 1; j < bp->max_tpa; j++)
3193 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
ec4d8e7c
MC
3194 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3195 GFP_KERNEL);
3196 if (!rxr->rx_tpa_idx_map)
3197 return -ENOMEM;
4a228a3a
MC
3198 }
3199 return 0;
3200}
3201
c0c050c5
MC
3202static void bnxt_free_rx_rings(struct bnxt *bp)
3203{
3204 int i;
3205
b6ab4b01 3206 if (!bp->rx_ring)
c0c050c5
MC
3207 return;
3208
4a228a3a 3209 bnxt_free_tpa_info(bp);
c0c050c5 3210 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3211 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3212 struct bnxt_ring_struct *ring;
3213
c6d30e83
MC
3214 if (rxr->xdp_prog)
3215 bpf_prog_put(rxr->xdp_prog);
3216
96a8604f
JDB
3217 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3218 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3219
12479f62 3220 page_pool_destroy(rxr->page_pool);
322b87ca
AG
3221 rxr->page_pool = NULL;
3222
c0c050c5
MC
3223 kfree(rxr->rx_agg_bmap);
3224 rxr->rx_agg_bmap = NULL;
3225
3226 ring = &rxr->rx_ring_struct;
6fe19886 3227 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3228
3229 ring = &rxr->rx_agg_ring_struct;
6fe19886 3230 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3231 }
3232}
3233
322b87ca
AG
3234static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3235 struct bnxt_rx_ring_info *rxr)
3236{
3237 struct page_pool_params pp = { 0 };
3238
3239 pp.pool_size = bp->rx_ring_size;
3240 pp.nid = dev_to_node(&bp->pdev->dev);
3241 pp.dev = &bp->pdev->dev;
3242 pp.dma_dir = DMA_BIDIRECTIONAL;
3243
3244 rxr->page_pool = page_pool_create(&pp);
3245 if (IS_ERR(rxr->page_pool)) {
3246 int err = PTR_ERR(rxr->page_pool);
3247
3248 rxr->page_pool = NULL;
3249 return err;
3250 }
3251 return 0;
3252}
3253
c0c050c5
MC
3254static int bnxt_alloc_rx_rings(struct bnxt *bp)
3255{
4a228a3a 3256 int i, rc = 0, agg_rings = 0;
c0c050c5 3257
b6ab4b01
MC
3258 if (!bp->rx_ring)
3259 return -ENOMEM;
3260
c0c050c5
MC
3261 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3262 agg_rings = 1;
3263
c0c050c5 3264 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3265 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3266 struct bnxt_ring_struct *ring;
3267
c0c050c5
MC
3268 ring = &rxr->rx_ring_struct;
3269
322b87ca
AG
3270 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3271 if (rc)
3272 return rc;
3273
b02e5a0e 3274 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
12479f62 3275 if (rc < 0)
96a8604f
JDB
3276 return rc;
3277
f18c2b77 3278 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
3279 MEM_TYPE_PAGE_POOL,
3280 rxr->page_pool);
f18c2b77
AG
3281 if (rc) {
3282 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3283 return rc;
3284 }
3285
6fe19886 3286 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3287 if (rc)
3288 return rc;
3289
2c61d211 3290 ring->grp_idx = i;
c0c050c5
MC
3291 if (agg_rings) {
3292 u16 mem_size;
3293
3294 ring = &rxr->rx_agg_ring_struct;
6fe19886 3295 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3296 if (rc)
3297 return rc;
3298
9899bb59 3299 ring->grp_idx = i;
c0c050c5
MC
3300 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3301 mem_size = rxr->rx_agg_bmap_size / 8;
3302 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3303 if (!rxr->rx_agg_bmap)
3304 return -ENOMEM;
c0c050c5
MC
3305 }
3306 }
4a228a3a
MC
3307 if (bp->flags & BNXT_FLAG_TPA)
3308 rc = bnxt_alloc_tpa_info(bp);
3309 return rc;
c0c050c5
MC
3310}
3311
3312static void bnxt_free_tx_rings(struct bnxt *bp)
3313{
3314 int i;
3315 struct pci_dev *pdev = bp->pdev;
3316
b6ab4b01 3317 if (!bp->tx_ring)
c0c050c5
MC
3318 return;
3319
3320 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3321 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3322 struct bnxt_ring_struct *ring;
3323
c0c050c5
MC
3324 if (txr->tx_push) {
3325 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3326 txr->tx_push, txr->tx_push_mapping);
3327 txr->tx_push = NULL;
3328 }
3329
3330 ring = &txr->tx_ring_struct;
3331
6fe19886 3332 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3333 }
3334}
3335
3336static int bnxt_alloc_tx_rings(struct bnxt *bp)
3337{
3338 int i, j, rc;
3339 struct pci_dev *pdev = bp->pdev;
3340
3341 bp->tx_push_size = 0;
3342 if (bp->tx_push_thresh) {
3343 int push_size;
3344
3345 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3346 bp->tx_push_thresh);
3347
4419dbe6 3348 if (push_size > 256) {
c0c050c5
MC
3349 push_size = 0;
3350 bp->tx_push_thresh = 0;
3351 }
3352
3353 bp->tx_push_size = push_size;
3354 }
3355
3356 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3357 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 3358 struct bnxt_ring_struct *ring;
2e8ef77e 3359 u8 qidx;
c0c050c5 3360
c0c050c5
MC
3361 ring = &txr->tx_ring_struct;
3362
6fe19886 3363 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3364 if (rc)
3365 return rc;
3366
9899bb59 3367 ring->grp_idx = txr->bnapi->index;
c0c050c5 3368 if (bp->tx_push_size) {
c0c050c5
MC
3369 dma_addr_t mapping;
3370
3371 /* One pre-allocated DMA buffer to backup
3372 * TX push operation
3373 */
3374 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3375 bp->tx_push_size,
3376 &txr->tx_push_mapping,
3377 GFP_KERNEL);
3378
3379 if (!txr->tx_push)
3380 return -ENOMEM;
3381
c0c050c5
MC
3382 mapping = txr->tx_push_mapping +
3383 sizeof(struct tx_push_bd);
4419dbe6 3384 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 3385 }
2e8ef77e
MC
3386 qidx = bp->tc_to_qidx[j];
3387 ring->queue_id = bp->q_info[qidx].queue_id;
4f81def2 3388 spin_lock_init(&txr->xdp_tx_lock);
5f449249
MC
3389 if (i < bp->tx_nr_rings_xdp)
3390 continue;
c0c050c5
MC
3391 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3392 j++;
3393 }
3394 return 0;
3395}
3396
03c74487
MC
3397static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3398{
985941e1
MC
3399 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3400
03c74487
MC
3401 kfree(cpr->cp_desc_ring);
3402 cpr->cp_desc_ring = NULL;
985941e1 3403 ring->ring_mem.pg_arr = NULL;
03c74487
MC
3404 kfree(cpr->cp_desc_mapping);
3405 cpr->cp_desc_mapping = NULL;
985941e1 3406 ring->ring_mem.dma_arr = NULL;
03c74487
MC
3407}
3408
3409static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3410{
3411 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3412 if (!cpr->cp_desc_ring)
3413 return -ENOMEM;
3414 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3415 GFP_KERNEL);
3416 if (!cpr->cp_desc_mapping)
3417 return -ENOMEM;
3418 return 0;
3419}
3420
3421static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3422{
3423 int i;
3424
3425 if (!bp->bnapi)
3426 return;
3427 for (i = 0; i < bp->cp_nr_rings; i++) {
3428 struct bnxt_napi *bnapi = bp->bnapi[i];
3429
3430 if (!bnapi)
3431 continue;
3432 bnxt_free_cp_arrays(&bnapi->cp_ring);
3433 }
3434}
3435
3436static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3437{
3438 int i, n = bp->cp_nr_pages;
3439
3440 for (i = 0; i < bp->cp_nr_rings; i++) {
3441 struct bnxt_napi *bnapi = bp->bnapi[i];
3442 int rc;
3443
3444 if (!bnapi)
3445 continue;
3446 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3447 if (rc)
3448 return rc;
3449 }
3450 return 0;
3451}
3452
c0c050c5
MC
3453static void bnxt_free_cp_rings(struct bnxt *bp)
3454{
3455 int i;
3456
3457 if (!bp->bnapi)
3458 return;
3459
3460 for (i = 0; i < bp->cp_nr_rings; i++) {
3461 struct bnxt_napi *bnapi = bp->bnapi[i];
3462 struct bnxt_cp_ring_info *cpr;
3463 struct bnxt_ring_struct *ring;
50e3ab78 3464 int j;
c0c050c5
MC
3465
3466 if (!bnapi)
3467 continue;
3468
3469 cpr = &bnapi->cp_ring;
3470 ring = &cpr->cp_ring_struct;
3471
6fe19886 3472 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
3473
3474 for (j = 0; j < 2; j++) {
3475 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3476
3477 if (cpr2) {
3478 ring = &cpr2->cp_ring_struct;
3479 bnxt_free_ring(bp, &ring->ring_mem);
03c74487 3480 bnxt_free_cp_arrays(cpr2);
50e3ab78
MC
3481 kfree(cpr2);
3482 cpr->cp_ring_arr[j] = NULL;
3483 }
3484 }
c0c050c5
MC
3485 }
3486}
3487
50e3ab78
MC
3488static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3489{
3490 struct bnxt_ring_mem_info *rmem;
3491 struct bnxt_ring_struct *ring;
3492 struct bnxt_cp_ring_info *cpr;
3493 int rc;
3494
3495 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3496 if (!cpr)
3497 return NULL;
3498
03c74487
MC
3499 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3500 if (rc) {
3501 bnxt_free_cp_arrays(cpr);
3502 kfree(cpr);
3503 return NULL;
3504 }
50e3ab78
MC
3505 ring = &cpr->cp_ring_struct;
3506 rmem = &ring->ring_mem;
3507 rmem->nr_pages = bp->cp_nr_pages;
3508 rmem->page_size = HW_CMPD_RING_SIZE;
3509 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3510 rmem->dma_arr = cpr->cp_desc_mapping;
3511 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3512 rc = bnxt_alloc_ring(bp, rmem);
3513 if (rc) {
3514 bnxt_free_ring(bp, rmem);
03c74487 3515 bnxt_free_cp_arrays(cpr);
50e3ab78
MC
3516 kfree(cpr);
3517 cpr = NULL;
3518 }
3519 return cpr;
3520}
3521
c0c050c5
MC
3522static int bnxt_alloc_cp_rings(struct bnxt *bp)
3523{
50e3ab78 3524 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 3525 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 3526
e5811b8c
MC
3527 ulp_msix = bnxt_get_ulp_msix_num(bp);
3528 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
3529 for (i = 0; i < bp->cp_nr_rings; i++) {
3530 struct bnxt_napi *bnapi = bp->bnapi[i];
3531 struct bnxt_cp_ring_info *cpr;
3532 struct bnxt_ring_struct *ring;
3533
3534 if (!bnapi)
3535 continue;
3536
3537 cpr = &bnapi->cp_ring;
50e3ab78 3538 cpr->bnapi = bnapi;
c0c050c5
MC
3539 ring = &cpr->cp_ring_struct;
3540
6fe19886 3541 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3542 if (rc)
3543 return rc;
e5811b8c
MC
3544
3545 if (ulp_msix && i >= ulp_base_vec)
3546 ring->map_idx = i + ulp_msix;
3547 else
3548 ring->map_idx = i;
50e3ab78
MC
3549
3550 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3551 continue;
3552
3553 if (i < bp->rx_nr_rings) {
3554 struct bnxt_cp_ring_info *cpr2 =
3555 bnxt_alloc_cp_sub_ring(bp);
3556
3557 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3558 if (!cpr2)
3559 return -ENOMEM;
3560 cpr2->bnapi = bnapi;
3561 }
3562 if ((sh && i < bp->tx_nr_rings) ||
3563 (!sh && i >= bp->rx_nr_rings)) {
3564 struct bnxt_cp_ring_info *cpr2 =
3565 bnxt_alloc_cp_sub_ring(bp);
3566
3567 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3568 if (!cpr2)
3569 return -ENOMEM;
3570 cpr2->bnapi = bnapi;
3571 }
c0c050c5
MC
3572 }
3573 return 0;
3574}
3575
3576static void bnxt_init_ring_struct(struct bnxt *bp)
3577{
3578 int i;
3579
3580 for (i = 0; i < bp->cp_nr_rings; i++) {
3581 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 3582 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
3583 struct bnxt_cp_ring_info *cpr;
3584 struct bnxt_rx_ring_info *rxr;
3585 struct bnxt_tx_ring_info *txr;
3586 struct bnxt_ring_struct *ring;
3587
3588 if (!bnapi)
3589 continue;
3590
3591 cpr = &bnapi->cp_ring;
3592 ring = &cpr->cp_ring_struct;
6fe19886
MC
3593 rmem = &ring->ring_mem;
3594 rmem->nr_pages = bp->cp_nr_pages;
3595 rmem->page_size = HW_CMPD_RING_SIZE;
3596 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3597 rmem->dma_arr = cpr->cp_desc_mapping;
3598 rmem->vmem_size = 0;
c0c050c5 3599
b6ab4b01 3600 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3601 if (!rxr)
3602 goto skip_rx;
3603
c0c050c5 3604 ring = &rxr->rx_ring_struct;
6fe19886
MC
3605 rmem = &ring->ring_mem;
3606 rmem->nr_pages = bp->rx_nr_pages;
3607 rmem->page_size = HW_RXBD_RING_SIZE;
3608 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3609 rmem->dma_arr = rxr->rx_desc_mapping;
3610 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3611 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3612
3613 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3614 rmem = &ring->ring_mem;
3615 rmem->nr_pages = bp->rx_agg_nr_pages;
3616 rmem->page_size = HW_RXBD_RING_SIZE;
3617 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3618 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3619 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3620 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3621
3b2b7d9d 3622skip_rx:
b6ab4b01 3623 txr = bnapi->tx_ring;
3b2b7d9d
MC
3624 if (!txr)
3625 continue;
3626
c0c050c5 3627 ring = &txr->tx_ring_struct;
6fe19886
MC
3628 rmem = &ring->ring_mem;
3629 rmem->nr_pages = bp->tx_nr_pages;
3630 rmem->page_size = HW_RXBD_RING_SIZE;
3631 rmem->pg_arr = (void **)txr->tx_desc_ring;
3632 rmem->dma_arr = txr->tx_desc_mapping;
3633 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3634 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3635 }
3636}
3637
3638static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3639{
3640 int i;
3641 u32 prod;
3642 struct rx_bd **rx_buf_ring;
3643
6fe19886
MC
3644 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3645 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3646 int j;
3647 struct rx_bd *rxbd;
3648
3649 rxbd = rx_buf_ring[i];
3650 if (!rxbd)
3651 continue;
3652
3653 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3654 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3655 rxbd->rx_bd_opaque = prod;
3656 }
3657 }
3658}
3659
7737d325 3660static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
c0c050c5 3661{
7737d325 3662 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 3663 struct net_device *dev = bp->dev;
7737d325 3664 u32 prod;
c0c050c5
MC
3665 int i;
3666
c0c050c5
MC
3667 prod = rxr->rx_prod;
3668 for (i = 0; i < bp->rx_ring_size; i++) {
7737d325 3669 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3670 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3671 ring_nr, i, bp->rx_ring_size);
3672 break;
3673 }
3674 prod = NEXT_RX(prod);
3675 }
3676 rxr->rx_prod = prod;
edd0c2cc 3677
c0c050c5
MC
3678 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3679 return 0;
3680
c0c050c5
MC
3681 prod = rxr->rx_agg_prod;
3682 for (i = 0; i < bp->rx_agg_ring_size; i++) {
7737d325 3683 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3684 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3685 ring_nr, i, bp->rx_ring_size);
3686 break;
3687 }
3688 prod = NEXT_RX_AGG(prod);
3689 }
3690 rxr->rx_agg_prod = prod;
c0c050c5 3691
7737d325
MC
3692 if (rxr->rx_tpa) {
3693 dma_addr_t mapping;
3694 u8 *data;
c0c050c5 3695
7737d325 3696 for (i = 0; i < bp->max_tpa; i++) {
720908e5 3697 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
7737d325
MC
3698 if (!data)
3699 return -ENOMEM;
c0c050c5 3700
7737d325
MC
3701 rxr->rx_tpa[i].data = data;
3702 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3703 rxr->rx_tpa[i].mapping = mapping;
c0c050c5
MC
3704 }
3705 }
c0c050c5
MC
3706 return 0;
3707}
3708
7737d325
MC
3709static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3710{
3711 struct bnxt_rx_ring_info *rxr;
3712 struct bnxt_ring_struct *ring;
3713 u32 type;
3714
3715 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3716 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3717
3718 if (NET_IP_ALIGN == 2)
3719 type |= RX_BD_FLAGS_SOP;
3720
3721 rxr = &bp->rx_ring[ring_nr];
3722 ring = &rxr->rx_ring_struct;
3723 bnxt_init_rxbd_pages(ring, type);
3724
3725 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3726 bpf_prog_add(bp->xdp_prog, 1);
3727 rxr->xdp_prog = bp->xdp_prog;
3728 }
3729 ring->fw_ring_id = INVALID_HW_RING_ID;
3730
3731 ring = &rxr->rx_agg_ring_struct;
3732 ring->fw_ring_id = INVALID_HW_RING_ID;
3733
3734 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3735 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3736 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3737
3738 bnxt_init_rxbd_pages(ring, type);
3739 }
3740
3741 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3742}
3743
2247925f
SP
3744static void bnxt_init_cp_rings(struct bnxt *bp)
3745{
3e08b184 3746 int i, j;
2247925f
SP
3747
3748 for (i = 0; i < bp->cp_nr_rings; i++) {
3749 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3750 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3751
3752 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3753 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3754 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3755 for (j = 0; j < 2; j++) {
3756 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3757
3758 if (!cpr2)
3759 continue;
3760
3761 ring = &cpr2->cp_ring_struct;
3762 ring->fw_ring_id = INVALID_HW_RING_ID;
3763 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3764 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3765 }
2247925f
SP
3766 }
3767}
3768
c0c050c5
MC
3769static int bnxt_init_rx_rings(struct bnxt *bp)
3770{
3771 int i, rc = 0;
3772
c61fb99c 3773 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3774 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3775 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3776 } else {
3777 bp->rx_offset = BNXT_RX_OFFSET;
3778 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3779 }
b3dba77c 3780
c0c050c5
MC
3781 for (i = 0; i < bp->rx_nr_rings; i++) {
3782 rc = bnxt_init_one_rx_ring(bp, i);
3783 if (rc)
3784 break;
3785 }
3786
3787 return rc;
3788}
3789
3790static int bnxt_init_tx_rings(struct bnxt *bp)
3791{
3792 u16 i;
3793
3794 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
5bed8b07 3795 BNXT_MIN_TX_DESC_CNT);
c0c050c5
MC
3796
3797 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3798 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3799 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3800
3801 ring->fw_ring_id = INVALID_HW_RING_ID;
3802 }
3803
3804 return 0;
3805}
3806
3807static void bnxt_free_ring_grps(struct bnxt *bp)
3808{
3809 kfree(bp->grp_info);
3810 bp->grp_info = NULL;
3811}
3812
3813static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3814{
3815 int i;
3816
3817 if (irq_re_init) {
3818 bp->grp_info = kcalloc(bp->cp_nr_rings,
3819 sizeof(struct bnxt_ring_grp_info),
3820 GFP_KERNEL);
3821 if (!bp->grp_info)
3822 return -ENOMEM;
3823 }
3824 for (i = 0; i < bp->cp_nr_rings; i++) {
3825 if (irq_re_init)
3826 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3827 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3828 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3829 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3830 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3831 }
3832 return 0;
3833}
3834
3835static void bnxt_free_vnics(struct bnxt *bp)
3836{
3837 kfree(bp->vnic_info);
3838 bp->vnic_info = NULL;
3839 bp->nr_vnics = 0;
3840}
3841
3842static int bnxt_alloc_vnics(struct bnxt *bp)
3843{
3844 int num_vnics = 1;
3845
3846#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3847 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3848 num_vnics += bp->rx_nr_rings;
3849#endif
3850
dc52c6c7
PS
3851 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3852 num_vnics++;
3853
c0c050c5
MC
3854 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3855 GFP_KERNEL);
3856 if (!bp->vnic_info)
3857 return -ENOMEM;
3858
3859 bp->nr_vnics = num_vnics;
3860 return 0;
3861}
3862
3863static void bnxt_init_vnics(struct bnxt *bp)
3864{
3865 int i;
3866
3867 for (i = 0; i < bp->nr_vnics; i++) {
3868 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3869 int j;
c0c050c5
MC
3870
3871 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3872 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3873 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3874
c0c050c5
MC
3875 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3876
3877 if (bp->vnic_info[i].rss_hash_key) {
3878 if (i == 0)
197173db 3879 get_random_bytes(vnic->rss_hash_key,
c0c050c5
MC
3880 HW_HASH_KEY_SIZE);
3881 else
3882 memcpy(vnic->rss_hash_key,
3883 bp->vnic_info[0].rss_hash_key,
3884 HW_HASH_KEY_SIZE);
3885 }
3886 }
3887}
3888
3889static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3890{
3891 int pages;
3892
3893 pages = ring_size / desc_per_pg;
3894
3895 if (!pages)
3896 return 1;
3897
3898 pages++;
3899
3900 while (pages & (pages - 1))
3901 pages++;
3902
3903 return pages;
3904}
3905
c6d30e83 3906void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3907{
3908 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3909 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3910 return;
c0c050c5
MC
3911 if (bp->dev->features & NETIF_F_LRO)
3912 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3913 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3914 bp->flags |= BNXT_FLAG_GRO;
3915}
3916
3917/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3918 * be set on entry.
3919 */
3920void bnxt_set_ring_params(struct bnxt *bp)
3921{
27640ce6 3922 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
c0c050c5
MC
3923 u32 agg_factor = 0, agg_ring_size = 0;
3924
3925 /* 8 for CRC and VLAN */
3926 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3927
32861236 3928 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
c0c050c5
MC
3929 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3930
3931 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3932 ring_size = bp->rx_ring_size;
3933 bp->rx_agg_ring_size = 0;
3934 bp->rx_agg_nr_pages = 0;
3935
3936 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3937 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3938
3939 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3940 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3941 u32 jumbo_factor;
3942
3943 bp->flags |= BNXT_FLAG_JUMBO;
3944 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3945 if (jumbo_factor > agg_factor)
3946 agg_factor = jumbo_factor;
3947 }
c1129b51
MC
3948 if (agg_factor) {
3949 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3950 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3951 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3952 bp->rx_ring_size, ring_size);
3953 bp->rx_ring_size = ring_size;
3954 }
3955 agg_ring_size = ring_size * agg_factor;
c0c050c5 3956
c0c050c5
MC
3957 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3958 RX_DESC_CNT);
3959 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3960 u32 tmp = agg_ring_size;
3961
3962 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3963 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3964 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3965 tmp, agg_ring_size);
3966 }
3967 bp->rx_agg_ring_size = agg_ring_size;
3968 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
32861236
AG
3969
3970 if (BNXT_RX_PAGE_MODE(bp)) {
1abeacc1
MC
3971 rx_space = PAGE_SIZE;
3972 rx_size = PAGE_SIZE -
3973 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
3974 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
32861236
AG
3975 } else {
3976 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3977 rx_space = rx_size + NET_SKB_PAD +
3978 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3979 }
c0c050c5
MC
3980 }
3981
3982 bp->rx_buf_use_size = rx_size;
3983 bp->rx_buf_size = rx_space;
3984
3985 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3986 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3987
3988 ring_size = bp->tx_ring_size;
3989 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3990 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3991
27640ce6
MC
3992 max_rx_cmpl = bp->rx_ring_size;
3993 /* MAX TPA needs to be added because TPA_START completions are
3994 * immediately recycled, so the TPA completions are not bound by
3995 * the RX ring size.
3996 */
3997 if (bp->flags & BNXT_FLAG_TPA)
3998 max_rx_cmpl += bp->max_tpa;
3999 /* RX and TPA completions are 32-byte, all others are 16-byte */
4000 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
c0c050c5
MC
4001 bp->cp_ring_size = ring_size;
4002
4003 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4004 if (bp->cp_nr_pages > MAX_CP_PAGES) {
4005 bp->cp_nr_pages = MAX_CP_PAGES;
4006 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4007 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4008 ring_size, bp->cp_ring_size);
4009 }
4010 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4011 bp->cp_ring_mask = bp->cp_bit - 1;
4012}
4013
96a8604f
JDB
4014/* Changing allocation mode of RX rings.
4015 * TODO: Update when extending xdp_rxq_info to support allocation modes.
4016 */
c61fb99c 4017int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 4018{
c61fb99c 4019 if (page_mode) {
c61fb99c 4020 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
1dc4c557
AG
4021 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4022
4023 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4024 bp->flags |= BNXT_FLAG_JUMBO;
4025 bp->rx_skb_func = bnxt_rx_multi_page_skb;
4026 bp->dev->max_mtu =
4027 min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4028 } else {
4029 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4030 bp->rx_skb_func = bnxt_rx_page_skb;
4031 bp->dev->max_mtu =
4032 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4033 }
c61fb99c 4034 bp->rx_dir = DMA_BIDIRECTIONAL;
1054aee8
MC
4035 /* Disable LRO or GRO_HW */
4036 netdev_update_features(bp->dev);
c61fb99c 4037 } else {
7eb9bb3a 4038 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
4039 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4040 bp->rx_dir = DMA_FROM_DEVICE;
4041 bp->rx_skb_func = bnxt_rx_skb;
4042 }
6bb19474
MC
4043 return 0;
4044}
4045
c0c050c5
MC
4046static void bnxt_free_vnic_attributes(struct bnxt *bp)
4047{
4048 int i;
4049 struct bnxt_vnic_info *vnic;
4050 struct pci_dev *pdev = bp->pdev;
4051
4052 if (!bp->vnic_info)
4053 return;
4054
4055 for (i = 0; i < bp->nr_vnics; i++) {
4056 vnic = &bp->vnic_info[i];
4057
4058 kfree(vnic->fw_grp_ids);
4059 vnic->fw_grp_ids = NULL;
4060
4061 kfree(vnic->uc_list);
4062 vnic->uc_list = NULL;
4063
4064 if (vnic->mc_list) {
4065 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4066 vnic->mc_list, vnic->mc_list_mapping);
4067 vnic->mc_list = NULL;
4068 }
4069
4070 if (vnic->rss_table) {
34370d24 4071 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
c0c050c5
MC
4072 vnic->rss_table,
4073 vnic->rss_table_dma_addr);
4074 vnic->rss_table = NULL;
4075 }
4076
4077 vnic->rss_hash_key = NULL;
4078 vnic->flags = 0;
4079 }
4080}
4081
4082static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4083{
4084 int i, rc = 0, size;
4085 struct bnxt_vnic_info *vnic;
4086 struct pci_dev *pdev = bp->pdev;
4087 int max_rings;
4088
4089 for (i = 0; i < bp->nr_vnics; i++) {
4090 vnic = &bp->vnic_info[i];
4091
4092 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4093 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4094
4095 if (mem_size > 0) {
4096 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4097 if (!vnic->uc_list) {
4098 rc = -ENOMEM;
4099 goto out;
4100 }
4101 }
4102 }
4103
4104 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4105 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4106 vnic->mc_list =
4107 dma_alloc_coherent(&pdev->dev,
4108 vnic->mc_list_size,
4109 &vnic->mc_list_mapping,
4110 GFP_KERNEL);
4111 if (!vnic->mc_list) {
4112 rc = -ENOMEM;
4113 goto out;
4114 }
4115 }
4116
44c6f72a
MC
4117 if (bp->flags & BNXT_FLAG_CHIP_P5)
4118 goto vnic_skip_grps;
4119
c0c050c5
MC
4120 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4121 max_rings = bp->rx_nr_rings;
4122 else
4123 max_rings = 1;
4124
4125 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4126 if (!vnic->fw_grp_ids) {
4127 rc = -ENOMEM;
4128 goto out;
4129 }
44c6f72a 4130vnic_skip_grps:
ae10ae74
MC
4131 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
4132 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4133 continue;
4134
c0c050c5 4135 /* Allocate rss table and hash key */
34370d24
MC
4136 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4137 if (bp->flags & BNXT_FLAG_CHIP_P5)
4138 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4139
4140 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4141 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4142 vnic->rss_table_size,
c0c050c5
MC
4143 &vnic->rss_table_dma_addr,
4144 GFP_KERNEL);
4145 if (!vnic->rss_table) {
4146 rc = -ENOMEM;
4147 goto out;
4148 }
4149
c0c050c5
MC
4150 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4151 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4152 }
4153 return 0;
4154
4155out:
4156 return rc;
4157}
4158
4159static void bnxt_free_hwrm_resources(struct bnxt *bp)
4160{
68f684e2
EP
4161 struct bnxt_hwrm_wait_token *token;
4162
f9ff5782
EP
4163 dma_pool_destroy(bp->hwrm_dma_pool);
4164 bp->hwrm_dma_pool = NULL;
68f684e2
EP
4165
4166 rcu_read_lock();
4167 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4168 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4169 rcu_read_unlock();
c0c050c5
MC
4170}
4171
4172static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4173{
b34695a8 4174 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
f9ff5782
EP
4175 BNXT_HWRM_DMA_SIZE,
4176 BNXT_HWRM_DMA_ALIGN, 0);
4177 if (!bp->hwrm_dma_pool)
e605db80
DK
4178 return -ENOMEM;
4179
68f684e2
EP
4180 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4181
e605db80
DK
4182 return 0;
4183}
4184
177a6cde 4185static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
c0c050c5 4186{
a37120b2
MC
4187 kfree(stats->hw_masks);
4188 stats->hw_masks = NULL;
4189 kfree(stats->sw_stats);
4190 stats->sw_stats = NULL;
177a6cde
MC
4191 if (stats->hw_stats) {
4192 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4193 stats->hw_stats_map);
4194 stats->hw_stats = NULL;
4195 }
4196}
c0c050c5 4197
a37120b2
MC
4198static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4199 bool alloc_masks)
177a6cde
MC
4200{
4201 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4202 &stats->hw_stats_map, GFP_KERNEL);
4203 if (!stats->hw_stats)
4204 return -ENOMEM;
00db3cba 4205
a37120b2
MC
4206 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4207 if (!stats->sw_stats)
4208 goto stats_mem_err;
4209
4210 if (alloc_masks) {
4211 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4212 if (!stats->hw_masks)
4213 goto stats_mem_err;
4214 }
177a6cde 4215 return 0;
a37120b2
MC
4216
4217stats_mem_err:
4218 bnxt_free_stats_mem(bp, stats);
4219 return -ENOMEM;
177a6cde 4220}
00db3cba 4221
d752d053
MC
4222static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4223{
4224 int i;
4225
4226 for (i = 0; i < count; i++)
4227 mask_arr[i] = mask;
4228}
4229
4230static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4231{
4232 int i;
4233
4234 for (i = 0; i < count; i++)
4235 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4236}
4237
4238static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4239 struct bnxt_stats_mem *stats)
4240{
bbf33d1d
EP
4241 struct hwrm_func_qstats_ext_output *resp;
4242 struct hwrm_func_qstats_ext_input *req;
d752d053
MC
4243 __le64 *hw_masks;
4244 int rc;
4245
4246 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4247 !(bp->flags & BNXT_FLAG_CHIP_P5))
4248 return -EOPNOTSUPP;
4249
bbf33d1d 4250 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
d752d053 4251 if (rc)
bbf33d1d 4252 return rc;
d752d053 4253
bbf33d1d
EP
4254 req->fid = cpu_to_le16(0xffff);
4255 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
d752d053 4256
bbf33d1d
EP
4257 resp = hwrm_req_hold(bp, req);
4258 rc = hwrm_req_send(bp, req);
4259 if (!rc) {
4260 hw_masks = &resp->rx_ucast_pkts;
4261 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4262 }
4263 hwrm_req_drop(bp, req);
d752d053
MC
4264 return rc;
4265}
4266
531d1d26
MC
4267static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4268static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4269
d752d053
MC
4270static void bnxt_init_stats(struct bnxt *bp)
4271{
4272 struct bnxt_napi *bnapi = bp->bnapi[0];
4273 struct bnxt_cp_ring_info *cpr;
4274 struct bnxt_stats_mem *stats;
531d1d26
MC
4275 __le64 *rx_stats, *tx_stats;
4276 int rc, rx_count, tx_count;
4277 u64 *rx_masks, *tx_masks;
d752d053 4278 u64 mask;
531d1d26 4279 u8 flags;
d752d053
MC
4280
4281 cpr = &bnapi->cp_ring;
4282 stats = &cpr->stats;
4283 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4284 if (rc) {
4285 if (bp->flags & BNXT_FLAG_CHIP_P5)
4286 mask = (1ULL << 48) - 1;
4287 else
4288 mask = -1ULL;
4289 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4290 }
531d1d26
MC
4291 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4292 stats = &bp->port_stats;
4293 rx_stats = stats->hw_stats;
4294 rx_masks = stats->hw_masks;
4295 rx_count = sizeof(struct rx_port_stats) / 8;
4296 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4297 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4298 tx_count = sizeof(struct tx_port_stats) / 8;
4299
4300 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4301 rc = bnxt_hwrm_port_qstats(bp, flags);
4302 if (rc) {
4303 mask = (1ULL << 40) - 1;
4304
4305 bnxt_fill_masks(rx_masks, mask, rx_count);
4306 bnxt_fill_masks(tx_masks, mask, tx_count);
4307 } else {
4308 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4309 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4310 bnxt_hwrm_port_qstats(bp, 0);
4311 }
4312 }
4313 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4314 stats = &bp->rx_port_stats_ext;
4315 rx_stats = stats->hw_stats;
4316 rx_masks = stats->hw_masks;
4317 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4318 stats = &bp->tx_port_stats_ext;
4319 tx_stats = stats->hw_stats;
4320 tx_masks = stats->hw_masks;
4321 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4322
c07fa08f 4323 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
531d1d26
MC
4324 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4325 if (rc) {
4326 mask = (1ULL << 40) - 1;
4327
4328 bnxt_fill_masks(rx_masks, mask, rx_count);
4329 if (tx_stats)
4330 bnxt_fill_masks(tx_masks, mask, tx_count);
4331 } else {
4332 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4333 if (tx_stats)
4334 bnxt_copy_hw_masks(tx_masks, tx_stats,
4335 tx_count);
4336 bnxt_hwrm_port_qstats_ext(bp, 0);
4337 }
4338 }
d752d053
MC
4339}
4340
177a6cde
MC
4341static void bnxt_free_port_stats(struct bnxt *bp)
4342{
4343 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4344 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
36e53349 4345
177a6cde
MC
4346 bnxt_free_stats_mem(bp, &bp->port_stats);
4347 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4348 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
fd3ab1c7
MC
4349}
4350
4351static void bnxt_free_ring_stats(struct bnxt *bp)
4352{
177a6cde 4353 int i;
3bdf56c4 4354
c0c050c5
MC
4355 if (!bp->bnapi)
4356 return;
4357
c0c050c5
MC
4358 for (i = 0; i < bp->cp_nr_rings; i++) {
4359 struct bnxt_napi *bnapi = bp->bnapi[i];
4360 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4361
177a6cde 4362 bnxt_free_stats_mem(bp, &cpr->stats);
c0c050c5
MC
4363 }
4364}
4365
4366static int bnxt_alloc_stats(struct bnxt *bp)
4367{
4368 u32 size, i;
177a6cde 4369 int rc;
c0c050c5 4370
4e748506 4371 size = bp->hw_ring_stats_size;
c0c050c5
MC
4372
4373 for (i = 0; i < bp->cp_nr_rings; i++) {
4374 struct bnxt_napi *bnapi = bp->bnapi[i];
4375 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4376
177a6cde 4377 cpr->stats.len = size;
a37120b2 4378 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
177a6cde
MC
4379 if (rc)
4380 return rc;
c0c050c5
MC
4381
4382 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4383 }
3bdf56c4 4384
a220eabc
VV
4385 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4386 return 0;
fd3ab1c7 4387
177a6cde 4388 if (bp->port_stats.hw_stats)
a220eabc 4389 goto alloc_ext_stats;
3bdf56c4 4390
177a6cde 4391 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
a37120b2 4392 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
177a6cde
MC
4393 if (rc)
4394 return rc;
3bdf56c4 4395
a220eabc 4396 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 4397
fd3ab1c7 4398alloc_ext_stats:
a220eabc
VV
4399 /* Display extended statistics only if FW supports it */
4400 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 4401 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
4402 return 0;
4403
177a6cde 4404 if (bp->rx_port_stats_ext.hw_stats)
a220eabc 4405 goto alloc_tx_ext_stats;
fd3ab1c7 4406
177a6cde 4407 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
a37120b2 4408 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
177a6cde
MC
4409 /* Extended stats are optional */
4410 if (rc)
a220eabc 4411 return 0;
00db3cba 4412
fd3ab1c7 4413alloc_tx_ext_stats:
177a6cde 4414 if (bp->tx_port_stats_ext.hw_stats)
dfe64de9 4415 return 0;
fd3ab1c7 4416
6154532f
VV
4417 if (bp->hwrm_spec_code >= 0x10902 ||
4418 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
177a6cde 4419 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
a37120b2 4420 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
177a6cde
MC
4421 /* Extended stats are optional */
4422 if (rc)
4423 return 0;
3bdf56c4 4424 }
a220eabc 4425 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
c0c050c5
MC
4426 return 0;
4427}
4428
4429static void bnxt_clear_ring_indices(struct bnxt *bp)
4430{
4431 int i;
4432
4433 if (!bp->bnapi)
4434 return;
4435
4436 for (i = 0; i < bp->cp_nr_rings; i++) {
4437 struct bnxt_napi *bnapi = bp->bnapi[i];
4438 struct bnxt_cp_ring_info *cpr;
4439 struct bnxt_rx_ring_info *rxr;
4440 struct bnxt_tx_ring_info *txr;
4441
4442 if (!bnapi)
4443 continue;
4444
4445 cpr = &bnapi->cp_ring;
4446 cpr->cp_raw_cons = 0;
4447
b6ab4b01 4448 txr = bnapi->tx_ring;
3b2b7d9d
MC
4449 if (txr) {
4450 txr->tx_prod = 0;
4451 txr->tx_cons = 0;
4452 }
c0c050c5 4453
b6ab4b01 4454 rxr = bnapi->rx_ring;
3b2b7d9d
MC
4455 if (rxr) {
4456 rxr->rx_prod = 0;
4457 rxr->rx_agg_prod = 0;
4458 rxr->rx_sw_agg_prod = 0;
376a5b86 4459 rxr->rx_next_cons = 0;
3b2b7d9d 4460 }
c0c050c5
MC
4461 }
4462}
4463
4464static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4465{
4466#ifdef CONFIG_RFS_ACCEL
4467 int i;
4468
4469 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4470 * safe to delete the hash table.
4471 */
4472 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4473 struct hlist_head *head;
4474 struct hlist_node *tmp;
4475 struct bnxt_ntuple_filter *fltr;
4476
4477 head = &bp->ntp_fltr_hash_tbl[i];
4478 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4479 hlist_del(&fltr->hash);
4480 kfree(fltr);
4481 }
4482 }
4483 if (irq_reinit) {
45262522 4484 bitmap_free(bp->ntp_fltr_bmap);
c0c050c5
MC
4485 bp->ntp_fltr_bmap = NULL;
4486 }
4487 bp->ntp_fltr_count = 0;
4488#endif
4489}
4490
4491static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4492{
4493#ifdef CONFIG_RFS_ACCEL
4494 int i, rc = 0;
4495
4496 if (!(bp->flags & BNXT_FLAG_RFS))
4497 return 0;
4498
4499 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4500 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4501
4502 bp->ntp_fltr_count = 0;
45262522 4503 bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
c0c050c5
MC
4504
4505 if (!bp->ntp_fltr_bmap)
4506 rc = -ENOMEM;
4507
4508 return rc;
4509#else
4510 return 0;
4511#endif
4512}
4513
4514static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4515{
4516 bnxt_free_vnic_attributes(bp);
4517 bnxt_free_tx_rings(bp);
4518 bnxt_free_rx_rings(bp);
4519 bnxt_free_cp_rings(bp);
03c74487 4520 bnxt_free_all_cp_arrays(bp);
c0c050c5
MC
4521 bnxt_free_ntp_fltrs(bp, irq_re_init);
4522 if (irq_re_init) {
fd3ab1c7 4523 bnxt_free_ring_stats(bp);
b0d28207 4524 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
eba93de6 4525 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
fea6b333 4526 bnxt_free_port_stats(bp);
c0c050c5
MC
4527 bnxt_free_ring_grps(bp);
4528 bnxt_free_vnics(bp);
a960dec9
MC
4529 kfree(bp->tx_ring_map);
4530 bp->tx_ring_map = NULL;
b6ab4b01
MC
4531 kfree(bp->tx_ring);
4532 bp->tx_ring = NULL;
4533 kfree(bp->rx_ring);
4534 bp->rx_ring = NULL;
c0c050c5
MC
4535 kfree(bp->bnapi);
4536 bp->bnapi = NULL;
4537 } else {
4538 bnxt_clear_ring_indices(bp);
4539 }
4540}
4541
4542static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4543{
01657bcd 4544 int i, j, rc, size, arr_size;
c0c050c5
MC
4545 void *bnapi;
4546
4547 if (irq_re_init) {
4548 /* Allocate bnapi mem pointer array and mem block for
4549 * all queues
4550 */
4551 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4552 bp->cp_nr_rings);
4553 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4554 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4555 if (!bnapi)
4556 return -ENOMEM;
4557
4558 bp->bnapi = bnapi;
4559 bnapi += arr_size;
4560 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4561 bp->bnapi[i] = bnapi;
4562 bp->bnapi[i]->index = i;
4563 bp->bnapi[i]->bp = bp;
e38287b7
MC
4564 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4565 struct bnxt_cp_ring_info *cpr =
4566 &bp->bnapi[i]->cp_ring;
4567
4568 cpr->cp_ring_struct.ring_mem.flags =
4569 BNXT_RMEM_RING_PTE_FLAG;
4570 }
c0c050c5
MC
4571 }
4572
b6ab4b01
MC
4573 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4574 sizeof(struct bnxt_rx_ring_info),
4575 GFP_KERNEL);
4576 if (!bp->rx_ring)
4577 return -ENOMEM;
4578
4579 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
4580 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4581
4582 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4583 rxr->rx_ring_struct.ring_mem.flags =
4584 BNXT_RMEM_RING_PTE_FLAG;
4585 rxr->rx_agg_ring_struct.ring_mem.flags =
4586 BNXT_RMEM_RING_PTE_FLAG;
4587 }
4588 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
4589 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4590 }
4591
4592 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4593 sizeof(struct bnxt_tx_ring_info),
4594 GFP_KERNEL);
4595 if (!bp->tx_ring)
4596 return -ENOMEM;
4597
a960dec9
MC
4598 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4599 GFP_KERNEL);
4600
4601 if (!bp->tx_ring_map)
4602 return -ENOMEM;
4603
01657bcd
MC
4604 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4605 j = 0;
4606 else
4607 j = bp->rx_nr_rings;
4608
4609 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
4610 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4611
4612 if (bp->flags & BNXT_FLAG_CHIP_P5)
4613 txr->tx_ring_struct.ring_mem.flags =
4614 BNXT_RMEM_RING_PTE_FLAG;
4615 txr->bnapi = bp->bnapi[j];
4616 bp->bnapi[j]->tx_ring = txr;
5f449249 4617 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 4618 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 4619 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
4620 bp->bnapi[j]->tx_int = bnxt_tx_int;
4621 } else {
fa3e93e8 4622 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
4623 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4624 }
b6ab4b01
MC
4625 }
4626
c0c050c5
MC
4627 rc = bnxt_alloc_stats(bp);
4628 if (rc)
4629 goto alloc_mem_err;
d752d053 4630 bnxt_init_stats(bp);
c0c050c5
MC
4631
4632 rc = bnxt_alloc_ntp_fltrs(bp);
4633 if (rc)
4634 goto alloc_mem_err;
4635
4636 rc = bnxt_alloc_vnics(bp);
4637 if (rc)
4638 goto alloc_mem_err;
4639 }
4640
03c74487
MC
4641 rc = bnxt_alloc_all_cp_arrays(bp);
4642 if (rc)
4643 goto alloc_mem_err;
4644
c0c050c5
MC
4645 bnxt_init_ring_struct(bp);
4646
4647 rc = bnxt_alloc_rx_rings(bp);
4648 if (rc)
4649 goto alloc_mem_err;
4650
4651 rc = bnxt_alloc_tx_rings(bp);
4652 if (rc)
4653 goto alloc_mem_err;
4654
4655 rc = bnxt_alloc_cp_rings(bp);
4656 if (rc)
4657 goto alloc_mem_err;
4658
4659 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4660 BNXT_VNIC_UCAST_FLAG;
4661 rc = bnxt_alloc_vnic_attributes(bp);
4662 if (rc)
4663 goto alloc_mem_err;
4664 return 0;
4665
4666alloc_mem_err:
4667 bnxt_free_mem(bp, true);
4668 return rc;
4669}
4670
9d8bc097
MC
4671static void bnxt_disable_int(struct bnxt *bp)
4672{
4673 int i;
4674
4675 if (!bp->bnapi)
4676 return;
4677
4678 for (i = 0; i < bp->cp_nr_rings; i++) {
4679 struct bnxt_napi *bnapi = bp->bnapi[i];
4680 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 4681 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 4682
daf1f1e7 4683 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 4684 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4685 }
4686}
4687
e5811b8c
MC
4688static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4689{
4690 struct bnxt_napi *bnapi = bp->bnapi[n];
4691 struct bnxt_cp_ring_info *cpr;
4692
4693 cpr = &bnapi->cp_ring;
4694 return cpr->cp_ring_struct.map_idx;
4695}
4696
9d8bc097
MC
4697static void bnxt_disable_int_sync(struct bnxt *bp)
4698{
4699 int i;
4700
38290e37
MC
4701 if (!bp->irq_tbl)
4702 return;
4703
9d8bc097
MC
4704 atomic_inc(&bp->intr_sem);
4705
4706 bnxt_disable_int(bp);
e5811b8c
MC
4707 for (i = 0; i < bp->cp_nr_rings; i++) {
4708 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4709
4710 synchronize_irq(bp->irq_tbl[map_idx].vector);
4711 }
9d8bc097
MC
4712}
4713
4714static void bnxt_enable_int(struct bnxt *bp)
4715{
4716 int i;
4717
4718 atomic_set(&bp->intr_sem, 0);
4719 for (i = 0; i < bp->cp_nr_rings; i++) {
4720 struct bnxt_napi *bnapi = bp->bnapi[i];
4721 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4722
697197e5 4723 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4724 }
4725}
4726
2e882468
VV
4727int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4728 bool async_only)
c0c050c5 4729{
25be8623
MC
4730 DECLARE_BITMAP(async_events_bmap, 256);
4731 u32 *events = (u32 *)async_events_bmap;
bbf33d1d
EP
4732 struct hwrm_func_drv_rgtr_output *resp;
4733 struct hwrm_func_drv_rgtr_input *req;
acfb50e4 4734 u32 flags;
2e882468 4735 int rc, i;
a1653b13 4736
bbf33d1d
EP
4737 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4738 if (rc)
4739 return rc;
a1653b13 4740
bbf33d1d
EP
4741 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4742 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4743 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
a1653b13 4744
bbf33d1d 4745 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
8280b38e
VV
4746 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4747 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4748 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
acfb50e4 4749 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
e633a329
VV
4750 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4751 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
bbf33d1d
EP
4752 req->flags = cpu_to_le32(flags);
4753 req->ver_maj_8b = DRV_VER_MAJ;
4754 req->ver_min_8b = DRV_VER_MIN;
4755 req->ver_upd_8b = DRV_VER_UPD;
4756 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4757 req->ver_min = cpu_to_le16(DRV_VER_MIN);
4758 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4759
4760 if (BNXT_PF(bp)) {
9b0436c3 4761 u32 data[8];
a1653b13 4762 int i;
c0c050c5 4763
9b0436c3
MC
4764 memset(data, 0, sizeof(data));
4765 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4766 u16 cmd = bnxt_vf_req_snif[i];
4767 unsigned int bit, idx;
4768
4769 idx = cmd / 32;
4770 bit = cmd % 32;
4771 data[idx] |= 1 << bit;
4772 }
c0c050c5 4773
de68f5de 4774 for (i = 0; i < 8; i++)
bbf33d1d 4775 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
de68f5de 4776
bbf33d1d 4777 req->enables |=
c0c050c5
MC
4778 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4779 }
4780
abd43a13 4781 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
bbf33d1d 4782 req->flags |= cpu_to_le32(
abd43a13
VD
4783 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4784
2e882468
VV
4785 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4786 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4787 u16 event_id = bnxt_async_events_arr[i];
4788
4789 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4790 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4791 continue;
4792 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4793 }
4794 if (bmap && bmap_size) {
4795 for (i = 0; i < bmap_size; i++) {
4796 if (test_bit(i, bmap))
4797 __set_bit(i, async_events_bmap);
4798 }
4799 }
4800 for (i = 0; i < 8; i++)
bbf33d1d 4801 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
2e882468
VV
4802
4803 if (async_only)
bbf33d1d 4804 req->enables =
2e882468
VV
4805 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4806
bbf33d1d
EP
4807 resp = hwrm_req_hold(bp, req);
4808 rc = hwrm_req_send(bp, req);
bdb38602
VV
4809 if (!rc) {
4810 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4811 if (resp->flags &
4812 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4813 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4814 }
bbf33d1d 4815 hwrm_req_drop(bp, req);
25e1acd6 4816 return rc;
c0c050c5
MC
4817}
4818
228ea8c1 4819int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
be58a0da 4820{
bbf33d1d
EP
4821 struct hwrm_func_drv_unrgtr_input *req;
4822 int rc;
be58a0da 4823
bdb38602
VV
4824 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4825 return 0;
4826
bbf33d1d
EP
4827 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4828 if (rc)
4829 return rc;
4830 return hwrm_req_send(bp, req);
be58a0da
JH
4831}
4832
c0c050c5
MC
4833static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4834{
bbf33d1d
EP
4835 struct hwrm_tunnel_dst_port_free_input *req;
4836 int rc;
c0c050c5 4837
7ae9dc35
MC
4838 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4839 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4840 return 0;
4841 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4842 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4843 return 0;
4844
bbf33d1d
EP
4845 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4846 if (rc)
4847 return rc;
4848
4849 req->tunnel_type = tunnel_type;
c0c050c5
MC
4850
4851 switch (tunnel_type) {
4852 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
bbf33d1d 4853 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
7ae9dc35 4854 bp->vxlan_port = 0;
442a35a5 4855 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4856 break;
4857 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
bbf33d1d 4858 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
7ae9dc35 4859 bp->nge_port = 0;
442a35a5 4860 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4861 break;
4862 default:
4863 break;
4864 }
4865
bbf33d1d 4866 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4867 if (rc)
4868 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4869 rc);
4870 return rc;
4871}
4872
4873static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4874 u8 tunnel_type)
4875{
bbf33d1d
EP
4876 struct hwrm_tunnel_dst_port_alloc_output *resp;
4877 struct hwrm_tunnel_dst_port_alloc_input *req;
4878 int rc;
c0c050c5 4879
bbf33d1d
EP
4880 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4881 if (rc)
4882 return rc;
c0c050c5 4883
bbf33d1d
EP
4884 req->tunnel_type = tunnel_type;
4885 req->tunnel_dst_port_val = port;
c0c050c5 4886
bbf33d1d
EP
4887 resp = hwrm_req_hold(bp, req);
4888 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4889 if (rc) {
4890 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4891 rc);
4892 goto err_out;
4893 }
4894
57aac71b
CJ
4895 switch (tunnel_type) {
4896 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
7ae9dc35 4897 bp->vxlan_port = port;
442a35a5
JK
4898 bp->vxlan_fw_dst_port_id =
4899 le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4900 break;
4901 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
7ae9dc35 4902 bp->nge_port = port;
442a35a5 4903 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4904 break;
4905 default:
4906 break;
4907 }
4908
c0c050c5 4909err_out:
bbf33d1d 4910 hwrm_req_drop(bp, req);
c0c050c5
MC
4911 return rc;
4912}
4913
4914static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4915{
bbf33d1d 4916 struct hwrm_cfa_l2_set_rx_mask_input *req;
c0c050c5 4917 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d 4918 int rc;
c0c050c5 4919
bbf33d1d
EP
4920 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4921 if (rc)
4922 return rc;
c0c050c5 4923
bbf33d1d 4924 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
8cdb1592
PC
4925 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4926 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4927 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4928 }
bbf33d1d
EP
4929 req->mask = cpu_to_le32(vnic->rx_mask);
4930 return hwrm_req_send_silent(bp, req);
c0c050c5
MC
4931}
4932
4933#ifdef CONFIG_RFS_ACCEL
4934static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4935 struct bnxt_ntuple_filter *fltr)
4936{
bbf33d1d
EP
4937 struct hwrm_cfa_ntuple_filter_free_input *req;
4938 int rc;
c0c050c5 4939
bbf33d1d
EP
4940 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4941 if (rc)
4942 return rc;
4943
4944 req->ntuple_filter_id = fltr->filter_id;
4945 return hwrm_req_send(bp, req);
c0c050c5
MC
4946}
4947
4948#define BNXT_NTP_FLTR_FLAGS \
4949 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4950 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4951 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4952 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4953 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4954 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4955 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4956 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4957 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4958 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4959 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4960 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4961 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4962 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4963
61aad724
MC
4964#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4965 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4966
c0c050c5
MC
4967static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4968 struct bnxt_ntuple_filter *fltr)
4969{
5c209fc8 4970 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
bbf33d1d 4971 struct hwrm_cfa_ntuple_filter_alloc_input *req;
c0c050c5 4972 struct flow_keys *keys = &fltr->fkeys;
ac33906c 4973 struct bnxt_vnic_info *vnic;
41136ab3 4974 u32 flags = 0;
bbf33d1d 4975 int rc;
c0c050c5 4976
bbf33d1d
EP
4977 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4978 if (rc)
4979 return rc;
4980
4981 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4982
41136ab3
MC
4983 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4984 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
bbf33d1d 4985 req->dst_id = cpu_to_le16(fltr->rxq);
ac33906c
MC
4986 } else {
4987 vnic = &bp->vnic_info[fltr->rxq + 1];
bbf33d1d 4988 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
ac33906c 4989 }
bbf33d1d
EP
4990 req->flags = cpu_to_le32(flags);
4991 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
c0c050c5 4992
bbf33d1d
EP
4993 req->ethertype = htons(ETH_P_IP);
4994 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4995 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4996 req->ip_protocol = keys->basic.ip_proto;
c0c050c5 4997
dda0e746
MC
4998 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4999 int i;
5000
bbf33d1d
EP
5001 req->ethertype = htons(ETH_P_IPV6);
5002 req->ip_addr_type =
dda0e746 5003 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
bbf33d1d 5004 *(struct in6_addr *)&req->src_ipaddr[0] =
dda0e746 5005 keys->addrs.v6addrs.src;
bbf33d1d 5006 *(struct in6_addr *)&req->dst_ipaddr[0] =
dda0e746
MC
5007 keys->addrs.v6addrs.dst;
5008 for (i = 0; i < 4; i++) {
bbf33d1d
EP
5009 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
5010 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
dda0e746
MC
5011 }
5012 } else {
bbf33d1d
EP
5013 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
5014 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5015 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5016 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
dda0e746 5017 }
61aad724 5018 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
bbf33d1d
EP
5019 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5020 req->tunnel_type =
61aad724
MC
5021 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5022 }
c0c050c5 5023
bbf33d1d
EP
5024 req->src_port = keys->ports.src;
5025 req->src_port_mask = cpu_to_be16(0xffff);
5026 req->dst_port = keys->ports.dst;
5027 req->dst_port_mask = cpu_to_be16(0xffff);
c0c050c5 5028
bbf33d1d
EP
5029 resp = hwrm_req_hold(bp, req);
5030 rc = hwrm_req_send(bp, req);
5031 if (!rc)
c0c050c5 5032 fltr->filter_id = resp->ntuple_filter_id;
bbf33d1d 5033 hwrm_req_drop(bp, req);
c0c050c5
MC
5034 return rc;
5035}
5036#endif
5037
5038static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
76660757 5039 const u8 *mac_addr)
c0c050c5 5040{
bbf33d1d
EP
5041 struct hwrm_cfa_l2_filter_alloc_output *resp;
5042 struct hwrm_cfa_l2_filter_alloc_input *req;
5043 int rc;
c0c050c5 5044
bbf33d1d
EP
5045 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5046 if (rc)
5047 return rc;
5048
5049 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
dc52c6c7 5050 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
bbf33d1d 5051 req->flags |=
dc52c6c7 5052 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
bbf33d1d
EP
5053 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5054 req->enables =
c0c050c5 5055 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 5056 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5 5057 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
bbf33d1d
EP
5058 memcpy(req->l2_addr, mac_addr, ETH_ALEN);
5059 req->l2_addr_mask[0] = 0xff;
5060 req->l2_addr_mask[1] = 0xff;
5061 req->l2_addr_mask[2] = 0xff;
5062 req->l2_addr_mask[3] = 0xff;
5063 req->l2_addr_mask[4] = 0xff;
5064 req->l2_addr_mask[5] = 0xff;
5065
5066 resp = hwrm_req_hold(bp, req);
5067 rc = hwrm_req_send(bp, req);
c0c050c5
MC
5068 if (!rc)
5069 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5070 resp->l2_filter_id;
bbf33d1d 5071 hwrm_req_drop(bp, req);
c0c050c5
MC
5072 return rc;
5073}
5074
5075static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5076{
bbf33d1d 5077 struct hwrm_cfa_l2_filter_free_input *req;
c0c050c5 5078 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
bbf33d1d 5079 int rc;
c0c050c5
MC
5080
5081 /* Any associated ntuple filters will also be cleared by firmware. */
bbf33d1d
EP
5082 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5083 if (rc)
5084 return rc;
5085 hwrm_req_hold(bp, req);
c0c050c5
MC
5086 for (i = 0; i < num_of_vnics; i++) {
5087 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5088
5089 for (j = 0; j < vnic->uc_filter_count; j++) {
bbf33d1d 5090 req->l2_filter_id = vnic->fw_l2_filter_id[j];
c0c050c5 5091
bbf33d1d 5092 rc = hwrm_req_send(bp, req);
c0c050c5
MC
5093 }
5094 vnic->uc_filter_count = 0;
5095 }
bbf33d1d 5096 hwrm_req_drop(bp, req);
c0c050c5
MC
5097 return rc;
5098}
5099
5100static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5101{
5102 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 5103 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
bbf33d1d
EP
5104 struct hwrm_vnic_tpa_cfg_input *req;
5105 int rc;
c0c050c5 5106
3c4fe80b
MC
5107 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5108 return 0;
5109
bbf33d1d
EP
5110 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5111 if (rc)
5112 return rc;
c0c050c5
MC
5113
5114 if (tpa_flags) {
5115 u16 mss = bp->dev->mtu - 40;
5116 u32 nsegs, n, segs = 0, flags;
5117
5118 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5119 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5120 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5121 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5122 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5123 if (tpa_flags & BNXT_FLAG_GRO)
5124 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5125
bbf33d1d 5126 req->flags = cpu_to_le32(flags);
c0c050c5 5127
bbf33d1d 5128 req->enables =
c0c050c5 5129 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
5130 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5131 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
5132
5133 /* Number of segs are log2 units, and first packet is not
5134 * included as part of this units.
5135 */
2839f28b
MC
5136 if (mss <= BNXT_RX_PAGE_SIZE) {
5137 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
5138 nsegs = (MAX_SKB_FRAGS - 1) * n;
5139 } else {
2839f28b
MC
5140 n = mss / BNXT_RX_PAGE_SIZE;
5141 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
5142 n++;
5143 nsegs = (MAX_SKB_FRAGS - n) / n;
5144 }
5145
79632e9b
MC
5146 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5147 segs = MAX_TPA_SEGS_P5;
5148 max_aggs = bp->max_tpa;
5149 } else {
5150 segs = ilog2(nsegs);
5151 }
bbf33d1d
EP
5152 req->max_agg_segs = cpu_to_le16(segs);
5153 req->max_aggs = cpu_to_le16(max_aggs);
c193554e 5154
bbf33d1d 5155 req->min_agg_len = cpu_to_le32(512);
c0c050c5 5156 }
bbf33d1d 5157 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5 5158
bbf33d1d 5159 return hwrm_req_send(bp, req);
c0c050c5
MC
5160}
5161
2c61d211
MC
5162static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5163{
5164 struct bnxt_ring_grp_info *grp_info;
5165
5166 grp_info = &bp->grp_info[ring->grp_idx];
5167 return grp_info->cp_fw_ring_id;
5168}
5169
5170static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5171{
5172 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5173 struct bnxt_napi *bnapi = rxr->bnapi;
5174 struct bnxt_cp_ring_info *cpr;
5175
5176 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5177 return cpr->cp_ring_struct.fw_ring_id;
5178 } else {
5179 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5180 }
5181}
5182
5183static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5184{
5185 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5186 struct bnxt_napi *bnapi = txr->bnapi;
5187 struct bnxt_cp_ring_info *cpr;
5188
5189 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5190 return cpr->cp_ring_struct.fw_ring_id;
5191 } else {
5192 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5193 }
5194}
5195
1667cbf6
MC
5196static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5197{
5198 int entries;
5199
5200 if (bp->flags & BNXT_FLAG_CHIP_P5)
5201 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5202 else
5203 entries = HW_HASH_INDEX_SIZE;
5204
5205 bp->rss_indir_tbl_entries = entries;
5206 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5207 GFP_KERNEL);
5208 if (!bp->rss_indir_tbl)
5209 return -ENOMEM;
5210 return 0;
5211}
5212
5213static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5214{
5215 u16 max_rings, max_entries, pad, i;
5216
5217 if (!bp->rx_nr_rings)
5218 return;
5219
5220 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5221 max_rings = bp->rx_nr_rings - 1;
5222 else
5223 max_rings = bp->rx_nr_rings;
5224
5225 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5226
5227 for (i = 0; i < max_entries; i++)
5228 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5229
5230 pad = bp->rss_indir_tbl_entries - max_entries;
5231 if (pad)
5232 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5233}
5234
bd3191b5
MC
5235static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5236{
5237 u16 i, tbl_size, max_ring = 0;
5238
5239 if (!bp->rss_indir_tbl)
5240 return 0;
5241
5242 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5243 for (i = 0; i < tbl_size; i++)
5244 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5245 return max_ring;
5246}
5247
f9f6a3fb
MC
5248int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5249{
5250 if (bp->flags & BNXT_FLAG_CHIP_P5)
5251 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5252 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5253 return 2;
5254 return 1;
5255}
5256
41d2dd42 5257static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
f33a305d
MC
5258{
5259 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5260 u16 i, j;
5261
5262 /* Fill the RSS indirection table with ring group ids */
5263 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5264 if (!no_rss)
5265 j = bp->rss_indir_tbl[i];
5266 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5267 }
5268}
5269
41d2dd42
EP
5270static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5271 struct bnxt_vnic_info *vnic)
f33a305d
MC
5272{
5273 __le16 *ring_tbl = vnic->rss_table;
5274 struct bnxt_rx_ring_info *rxr;
5275 u16 tbl_size, i;
5276
5277 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5278
5279 for (i = 0; i < tbl_size; i++) {
5280 u16 ring_id, j;
5281
5282 j = bp->rss_indir_tbl[i];
5283 rxr = &bp->rx_ring[j];
5284
5285 ring_id = rxr->rx_ring_struct.fw_ring_id;
5286 *ring_tbl++ = cpu_to_le16(ring_id);
5287 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5288 *ring_tbl++ = cpu_to_le16(ring_id);
5289 }
5290}
5291
41d2dd42
EP
5292static void
5293__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
5294 struct bnxt_vnic_info *vnic)
f33a305d
MC
5295{
5296 if (bp->flags & BNXT_FLAG_CHIP_P5)
41d2dd42 5297 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
f33a305d 5298 else
41d2dd42
EP
5299 bnxt_fill_hw_rss_tbl(bp, vnic);
5300
98a4322b
EP
5301 if (bp->rss_hash_delta) {
5302 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
5303 if (bp->rss_hash_cfg & bp->rss_hash_delta)
5304 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
5305 else
5306 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
5307 } else {
5308 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5309 }
41d2dd42
EP
5310 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5311 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5312 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
f33a305d
MC
5313}
5314
c0c050c5
MC
5315static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5316{
c0c050c5 5317 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5318 struct hwrm_vnic_rss_cfg_input *req;
5319 int rc;
c0c050c5 5320
7b3af4f7
MC
5321 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5322 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
5323 return 0;
5324
bbf33d1d
EP
5325 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5326 if (rc)
5327 return rc;
5328
41d2dd42
EP
5329 if (set_rss)
5330 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
bbf33d1d
EP
5331 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5332 return hwrm_req_send(bp, req);
c0c050c5
MC
5333}
5334
7b3af4f7
MC
5335static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5336{
5337 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d 5338 struct hwrm_vnic_rss_cfg_input *req;
f33a305d
MC
5339 dma_addr_t ring_tbl_map;
5340 u32 i, nr_ctxs;
bbf33d1d
EP
5341 int rc;
5342
5343 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5344 if (rc)
5345 return rc;
5346
5347 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5348 if (!set_rss)
5349 return hwrm_req_send(bp, req);
7b3af4f7 5350
41d2dd42 5351 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
f33a305d 5352 ring_tbl_map = vnic->rss_table_dma_addr;
f9f6a3fb 5353 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7 5354
bbf33d1d
EP
5355 hwrm_req_hold(bp, req);
5356 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5357 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5358 req->ring_table_pair_index = i;
5359 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5360 rc = hwrm_req_send(bp, req);
7b3af4f7 5361 if (rc)
bbf33d1d 5362 goto exit;
7b3af4f7 5363 }
bbf33d1d
EP
5364
5365exit:
5366 hwrm_req_drop(bp, req);
5367 return rc;
7b3af4f7
MC
5368}
5369
98a4322b
EP
5370static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
5371{
5372 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5373 struct hwrm_vnic_rss_qcfg_output *resp;
5374 struct hwrm_vnic_rss_qcfg_input *req;
5375
5376 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
5377 return;
5378
5379 /* all contexts configured to same hash_type, zero always exists */
5380 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5381 resp = hwrm_req_hold(bp, req);
5382 if (!hwrm_req_send(bp, req)) {
5383 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
5384 bp->rss_hash_delta = 0;
5385 }
5386 hwrm_req_drop(bp, req);
5387}
5388
c0c050c5
MC
5389static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5390{
5391 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5392 struct hwrm_vnic_plcmodes_cfg_input *req;
5393 int rc;
5394
5395 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5396 if (rc)
5397 return rc;
c0c050c5 5398
32861236
AG
5399 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
5400 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
5401
a056ebcc
MC
5402 if (BNXT_RX_PAGE_MODE(bp)) {
5403 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
5404 } else {
32861236
AG
5405 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5406 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5407 req->enables |=
5408 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
a056ebcc
MC
5409 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5410 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
32861236 5411 }
bbf33d1d
EP
5412 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5413 return hwrm_req_send(bp, req);
c0c050c5
MC
5414}
5415
94ce9caa
PS
5416static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5417 u16 ctx_idx)
c0c050c5 5418{
bbf33d1d 5419 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
c0c050c5 5420
bbf33d1d
EP
5421 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5422 return;
5423
5424 req->rss_cos_lb_ctx_id =
94ce9caa 5425 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5 5426
bbf33d1d 5427 hwrm_req_send(bp, req);
94ce9caa 5428 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
5429}
5430
5431static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5432{
94ce9caa 5433 int i, j;
c0c050c5
MC
5434
5435 for (i = 0; i < bp->nr_vnics; i++) {
5436 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5437
94ce9caa
PS
5438 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5439 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5440 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5441 }
c0c050c5
MC
5442 }
5443 bp->rsscos_nr_ctxs = 0;
5444}
5445
94ce9caa 5446static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5 5447{
bbf33d1d
EP
5448 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5449 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
c0c050c5 5450 int rc;
c0c050c5 5451
bbf33d1d
EP
5452 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5453 if (rc)
5454 return rc;
c0c050c5 5455
bbf33d1d
EP
5456 resp = hwrm_req_hold(bp, req);
5457 rc = hwrm_req_send(bp, req);
c0c050c5 5458 if (!rc)
94ce9caa 5459 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5 5460 le16_to_cpu(resp->rss_cos_lb_ctx_id);
bbf33d1d 5461 hwrm_req_drop(bp, req);
c0c050c5
MC
5462
5463 return rc;
5464}
5465
abe93ad2
MC
5466static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5467{
5468 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5469 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5470 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5471}
5472
a588e458 5473int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 5474{
c0c050c5 5475 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5476 struct hwrm_vnic_cfg_input *req;
5477 unsigned int ring = 0, grp_idx;
cf6645f8 5478 u16 def_vlan = 0;
bbf33d1d 5479 int rc;
c0c050c5 5480
bbf33d1d
EP
5481 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5482 if (rc)
5483 return rc;
dc52c6c7 5484
7b3af4f7
MC
5485 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5486 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5487
bbf33d1d 5488 req->default_rx_ring_id =
7b3af4f7 5489 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
bbf33d1d 5490 req->default_cmpl_ring_id =
7b3af4f7 5491 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
bbf33d1d 5492 req->enables =
7b3af4f7
MC
5493 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5494 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5495 goto vnic_mru;
5496 }
bbf33d1d 5497 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 5498 /* Only RSS support for now TBD: COS & LB */
dc52c6c7 5499 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
bbf33d1d
EP
5500 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5501 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
dc52c6c7 5502 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74 5503 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
bbf33d1d 5504 req->rss_rule =
ae10ae74 5505 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
bbf33d1d 5506 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
ae10ae74 5507 VNIC_CFG_REQ_ENABLES_MRU);
bbf33d1d 5508 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7 5509 } else {
bbf33d1d 5510 req->rss_rule = cpu_to_le16(0xffff);
dc52c6c7 5511 }
94ce9caa 5512
dc52c6c7
PS
5513 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5514 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
bbf33d1d
EP
5515 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5516 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
94ce9caa 5517 } else {
bbf33d1d 5518 req->cos_rule = cpu_to_le16(0xffff);
94ce9caa
PS
5519 }
5520
c0c050c5 5521 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 5522 ring = 0;
c0c050c5 5523 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 5524 ring = vnic_id - 1;
76595193
PS
5525 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5526 ring = bp->rx_nr_rings - 1;
c0c050c5 5527
b81a90d3 5528 grp_idx = bp->rx_ring[ring].bnapi->index;
bbf33d1d
EP
5529 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5530 req->lb_rule = cpu_to_le16(0xffff);
7b3af4f7 5531vnic_mru:
bbf33d1d 5532 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
c0c050c5 5533
bbf33d1d 5534 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
5535#ifdef CONFIG_BNXT_SRIOV
5536 if (BNXT_VF(bp))
5537 def_vlan = bp->vf.vlan;
5538#endif
5539 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
bbf33d1d 5540 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 5541 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
bbf33d1d 5542 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5 5543
bbf33d1d 5544 return hwrm_req_send(bp, req);
c0c050c5
MC
5545}
5546
3d061591 5547static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
c0c050c5 5548{
c0c050c5 5549 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
bbf33d1d 5550 struct hwrm_vnic_free_input *req;
c0c050c5 5551
bbf33d1d
EP
5552 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5553 return;
5554
5555 req->vnic_id =
c0c050c5
MC
5556 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5557
bbf33d1d 5558 hwrm_req_send(bp, req);
c0c050c5
MC
5559 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5560 }
c0c050c5
MC
5561}
5562
5563static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5564{
5565 u16 i;
5566
5567 for (i = 0; i < bp->nr_vnics; i++)
5568 bnxt_hwrm_vnic_free_one(bp, i);
5569}
5570
b81a90d3
MC
5571static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5572 unsigned int start_rx_ring_idx,
5573 unsigned int nr_rings)
c0c050c5 5574{
b81a90d3 5575 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
44c6f72a 5576 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5577 struct hwrm_vnic_alloc_output *resp;
5578 struct hwrm_vnic_alloc_input *req;
5579 int rc;
5580
5581 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5582 if (rc)
5583 return rc;
44c6f72a
MC
5584
5585 if (bp->flags & BNXT_FLAG_CHIP_P5)
5586 goto vnic_no_ring_grps;
c0c050c5
MC
5587
5588 /* map ring groups to this vnic */
b81a90d3
MC
5589 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5590 grp_idx = bp->rx_ring[i].bnapi->index;
5591 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 5592 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 5593 j, nr_rings);
c0c050c5
MC
5594 break;
5595 }
44c6f72a 5596 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
5597 }
5598
44c6f72a
MC
5599vnic_no_ring_grps:
5600 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5601 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5 5602 if (vnic_id == 0)
bbf33d1d 5603 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
c0c050c5 5604
bbf33d1d
EP
5605 resp = hwrm_req_hold(bp, req);
5606 rc = hwrm_req_send(bp, req);
c0c050c5 5607 if (!rc)
44c6f72a 5608 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
bbf33d1d 5609 hwrm_req_drop(bp, req);
c0c050c5
MC
5610 return rc;
5611}
5612
8fdefd63
MC
5613static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5614{
bbf33d1d
EP
5615 struct hwrm_vnic_qcaps_output *resp;
5616 struct hwrm_vnic_qcaps_input *req;
8fdefd63
MC
5617 int rc;
5618
fbbdbc64 5619 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
ba642ab7 5620 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
8fdefd63
MC
5621 if (bp->hwrm_spec_code < 0x10600)
5622 return 0;
5623
bbf33d1d
EP
5624 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5625 if (rc)
5626 return rc;
5627
5628 resp = hwrm_req_hold(bp, req);
5629 rc = hwrm_req_send(bp, req);
8fdefd63 5630 if (!rc) {
abe93ad2
MC
5631 u32 flags = le32_to_cpu(resp->flags);
5632
41e8d798
MC
5633 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5634 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 5635 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
5636 if (flags &
5637 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5638 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
1da63ddd
EP
5639
5640 /* Older P5 fw before EXT_HW_STATS support did not set
5641 * VLAN_STRIP_CAP properly.
5642 */
5643 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
9d6b648c 5644 (BNXT_CHIP_P5_THOR(bp) &&
1da63ddd
EP
5645 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5646 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
98a4322b
EP
5647 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
5648 bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
79632e9b 5649 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
9d6b648c
MC
5650 if (bp->max_tpa_v2) {
5651 if (BNXT_CHIP_P5_THOR(bp))
5652 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5653 else
5654 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5655 }
8fdefd63 5656 }
bbf33d1d 5657 hwrm_req_drop(bp, req);
8fdefd63
MC
5658 return rc;
5659}
5660
c0c050c5
MC
5661static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5662{
bbf33d1d
EP
5663 struct hwrm_ring_grp_alloc_output *resp;
5664 struct hwrm_ring_grp_alloc_input *req;
5665 int rc;
c0c050c5 5666 u16 i;
c0c050c5 5667
44c6f72a
MC
5668 if (bp->flags & BNXT_FLAG_CHIP_P5)
5669 return 0;
5670
bbf33d1d
EP
5671 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5672 if (rc)
5673 return rc;
5674
5675 resp = hwrm_req_hold(bp, req);
c0c050c5 5676 for (i = 0; i < bp->rx_nr_rings; i++) {
b81a90d3 5677 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5 5678
bbf33d1d
EP
5679 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5680 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5681 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5682 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5 5683
bbf33d1d 5684 rc = hwrm_req_send(bp, req);
c0c050c5 5685
c0c050c5
MC
5686 if (rc)
5687 break;
5688
b81a90d3
MC
5689 bp->grp_info[grp_idx].fw_grp_id =
5690 le32_to_cpu(resp->ring_group_id);
c0c050c5 5691 }
bbf33d1d 5692 hwrm_req_drop(bp, req);
c0c050c5
MC
5693 return rc;
5694}
5695
3d061591 5696static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
c0c050c5 5697{
bbf33d1d 5698 struct hwrm_ring_grp_free_input *req;
c0c050c5 5699 u16 i;
c0c050c5 5700
44c6f72a 5701 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
3d061591 5702 return;
c0c050c5 5703
bbf33d1d
EP
5704 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5705 return;
c0c050c5 5706
bbf33d1d 5707 hwrm_req_hold(bp, req);
c0c050c5
MC
5708 for (i = 0; i < bp->cp_nr_rings; i++) {
5709 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5710 continue;
bbf33d1d 5711 req->ring_group_id =
c0c050c5
MC
5712 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5713
bbf33d1d 5714 hwrm_req_send(bp, req);
c0c050c5
MC
5715 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5716 }
bbf33d1d 5717 hwrm_req_drop(bp, req);
c0c050c5
MC
5718}
5719
5720static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5721 struct bnxt_ring_struct *ring,
9899bb59 5722 u32 ring_type, u32 map_index)
c0c050c5 5723{
bbf33d1d
EP
5724 struct hwrm_ring_alloc_output *resp;
5725 struct hwrm_ring_alloc_input *req;
6fe19886 5726 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5727 struct bnxt_ring_grp_info *grp_info;
bbf33d1d 5728 int rc, err = 0;
c0c050c5
MC
5729 u16 ring_id;
5730
bbf33d1d
EP
5731 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5732 if (rc)
5733 goto exit;
c0c050c5 5734
bbf33d1d 5735 req->enables = 0;
6fe19886 5736 if (rmem->nr_pages > 1) {
bbf33d1d 5737 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5 5738 /* Page size is in log2 units */
bbf33d1d
EP
5739 req->page_size = BNXT_PAGE_SHIFT;
5740 req->page_tbl_depth = 1;
c0c050c5 5741 } else {
bbf33d1d 5742 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5 5743 }
bbf33d1d 5744 req->fbo = 0;
c0c050c5 5745 /* Association of ring index with doorbell index and MSIX number */
bbf33d1d 5746 req->logical_id = cpu_to_le16(map_index);
c0c050c5
MC
5747
5748 switch (ring_type) {
2c61d211
MC
5749 case HWRM_RING_ALLOC_TX: {
5750 struct bnxt_tx_ring_info *txr;
5751
5752 txr = container_of(ring, struct bnxt_tx_ring_info,
5753 tx_ring_struct);
bbf33d1d 5754 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
c0c050c5 5755 /* Association of transmit ring with completion ring */
9899bb59 5756 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5757 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5758 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5759 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5760 req->queue_id = cpu_to_le16(ring->queue_id);
c0c050c5 5761 break;
2c61d211 5762 }
c0c050c5 5763 case HWRM_RING_ALLOC_RX:
bbf33d1d
EP
5764 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5765 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5766 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5767 u16 flags = 0;
5768
5769 /* Association of rx ring with stats context */
5770 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5771 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5772 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5773 req->enables |= cpu_to_le32(
23aefdd7
MC
5774 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5775 if (NET_IP_ALIGN == 2)
5776 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
bbf33d1d 5777 req->flags = cpu_to_le16(flags);
23aefdd7 5778 }
c0c050c5
MC
5779 break;
5780 case HWRM_RING_ALLOC_AGG:
23aefdd7 5781 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bbf33d1d 5782 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
23aefdd7
MC
5783 /* Association of agg ring with rx ring */
5784 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5785 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5786 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5787 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5788 req->enables |= cpu_to_le32(
23aefdd7
MC
5789 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5790 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5791 } else {
bbf33d1d 5792 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
23aefdd7 5793 }
bbf33d1d 5794 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
c0c050c5
MC
5795 break;
5796 case HWRM_RING_ALLOC_CMPL:
bbf33d1d
EP
5797 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5798 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5799 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5800 /* Association of cp ring with nq */
5801 grp_info = &bp->grp_info[map_index];
bbf33d1d
EP
5802 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5803 req->cq_handle = cpu_to_le64(ring->handle);
5804 req->enables |= cpu_to_le32(
23aefdd7
MC
5805 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5806 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
bbf33d1d 5807 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
23aefdd7
MC
5808 }
5809 break;
5810 case HWRM_RING_ALLOC_NQ:
bbf33d1d
EP
5811 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5812 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5 5813 if (bp->flags & BNXT_FLAG_USING_MSIX)
bbf33d1d 5814 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
c0c050c5
MC
5815 break;
5816 default:
5817 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5818 ring_type);
5819 return -1;
5820 }
5821
bbf33d1d
EP
5822 resp = hwrm_req_hold(bp, req);
5823 rc = hwrm_req_send(bp, req);
c0c050c5
MC
5824 err = le16_to_cpu(resp->error_code);
5825 ring_id = le16_to_cpu(resp->ring_id);
bbf33d1d 5826 hwrm_req_drop(bp, req);
c0c050c5 5827
bbf33d1d 5828exit:
c0c050c5 5829 if (rc || err) {
2727c888
MC
5830 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5831 ring_type, rc, err);
5832 return -EIO;
c0c050c5
MC
5833 }
5834 ring->fw_ring_id = ring_id;
5835 return rc;
5836}
5837
486b5c22
MC
5838static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5839{
5840 int rc;
5841
5842 if (BNXT_PF(bp)) {
bbf33d1d 5843 struct hwrm_func_cfg_input *req;
486b5c22 5844
bbf33d1d
EP
5845 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5846 if (rc)
5847 return rc;
5848
5849 req->fid = cpu_to_le16(0xffff);
5850 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5851 req->async_event_cr = cpu_to_le16(idx);
5852 return hwrm_req_send(bp, req);
486b5c22 5853 } else {
bbf33d1d
EP
5854 struct hwrm_func_vf_cfg_input *req;
5855
5856 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5857 if (rc)
5858 return rc;
486b5c22 5859
bbf33d1d 5860 req->enables =
486b5c22 5861 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
bbf33d1d
EP
5862 req->async_event_cr = cpu_to_le16(idx);
5863 return hwrm_req_send(bp, req);
486b5c22 5864 }
486b5c22
MC
5865}
5866
697197e5
MC
5867static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5868 u32 map_idx, u32 xid)
5869{
5870 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5871 if (BNXT_PF(bp))
ebdf73dc 5872 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
697197e5 5873 else
ebdf73dc 5874 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
697197e5
MC
5875 switch (ring_type) {
5876 case HWRM_RING_ALLOC_TX:
5877 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5878 break;
5879 case HWRM_RING_ALLOC_RX:
5880 case HWRM_RING_ALLOC_AGG:
5881 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5882 break;
5883 case HWRM_RING_ALLOC_CMPL:
5884 db->db_key64 = DBR_PATH_L2;
5885 break;
5886 case HWRM_RING_ALLOC_NQ:
5887 db->db_key64 = DBR_PATH_L2;
5888 break;
5889 }
5890 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5891 } else {
5892 db->doorbell = bp->bar1 + map_idx * 0x80;
5893 switch (ring_type) {
5894 case HWRM_RING_ALLOC_TX:
5895 db->db_key32 = DB_KEY_TX;
5896 break;
5897 case HWRM_RING_ALLOC_RX:
5898 case HWRM_RING_ALLOC_AGG:
5899 db->db_key32 = DB_KEY_RX;
5900 break;
5901 case HWRM_RING_ALLOC_CMPL:
5902 db->db_key32 = DB_KEY_CP;
5903 break;
5904 }
5905 }
5906}
5907
c0c050c5
MC
5908static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5909{
e8f267b0 5910 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
c0c050c5 5911 int i, rc = 0;
697197e5 5912 u32 type;
c0c050c5 5913
23aefdd7
MC
5914 if (bp->flags & BNXT_FLAG_CHIP_P5)
5915 type = HWRM_RING_ALLOC_NQ;
5916 else
5917 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5918 for (i = 0; i < bp->cp_nr_rings; i++) {
5919 struct bnxt_napi *bnapi = bp->bnapi[i];
5920 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5921 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5922 u32 map_idx = ring->map_idx;
5e66e35a 5923 unsigned int vector;
c0c050c5 5924
5e66e35a
MC
5925 vector = bp->irq_tbl[map_idx].vector;
5926 disable_irq_nosync(vector);
697197e5 5927 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5928 if (rc) {
5929 enable_irq(vector);
edd0c2cc 5930 goto err_out;
5e66e35a 5931 }
697197e5
MC
5932 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5933 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5934 enable_irq(vector);
edd0c2cc 5935 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5936
5937 if (!i) {
5938 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5939 if (rc)
5940 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5941 }
c0c050c5
MC
5942 }
5943
697197e5 5944 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5945 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5946 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5947 struct bnxt_ring_struct *ring;
5948 u32 map_idx;
c0c050c5 5949
3e08b184
MC
5950 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5951 struct bnxt_napi *bnapi = txr->bnapi;
5952 struct bnxt_cp_ring_info *cpr, *cpr2;
5953 u32 type2 = HWRM_RING_ALLOC_CMPL;
5954
5955 cpr = &bnapi->cp_ring;
5956 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5957 ring = &cpr2->cp_ring_struct;
5958 ring->handle = BNXT_TX_HDL;
5959 map_idx = bnapi->index;
5960 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5961 if (rc)
5962 goto err_out;
5963 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5964 ring->fw_ring_id);
5965 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5966 }
5967 ring = &txr->tx_ring_struct;
5968 map_idx = i;
697197e5 5969 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5970 if (rc)
5971 goto err_out;
697197e5 5972 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5973 }
5974
697197e5 5975 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5976 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5977 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5978 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5979 struct bnxt_napi *bnapi = rxr->bnapi;
5980 u32 map_idx = bnapi->index;
c0c050c5 5981
697197e5 5982 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5983 if (rc)
5984 goto err_out;
697197e5 5985 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
e8f267b0
MC
5986 /* If we have agg rings, post agg buffers first. */
5987 if (!agg_rings)
5988 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5989 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5990 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5991 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5992 u32 type2 = HWRM_RING_ALLOC_CMPL;
5993 struct bnxt_cp_ring_info *cpr2;
5994
5995 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5996 ring = &cpr2->cp_ring_struct;
5997 ring->handle = BNXT_RX_HDL;
5998 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5999 if (rc)
6000 goto err_out;
6001 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
6002 ring->fw_ring_id);
6003 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6004 }
c0c050c5
MC
6005 }
6006
e8f267b0 6007 if (agg_rings) {
697197e5 6008 type = HWRM_RING_ALLOC_AGG;
c0c050c5 6009 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 6010 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
6011 struct bnxt_ring_struct *ring =
6012 &rxr->rx_agg_ring_struct;
9899bb59 6013 u32 grp_idx = ring->grp_idx;
b81a90d3 6014 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 6015
697197e5 6016 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
6017 if (rc)
6018 goto err_out;
6019
697197e5
MC
6020 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
6021 ring->fw_ring_id);
6022 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 6023 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 6024 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
6025 }
6026 }
6027err_out:
6028 return rc;
6029}
6030
6031static int hwrm_ring_free_send_msg(struct bnxt *bp,
6032 struct bnxt_ring_struct *ring,
6033 u32 ring_type, int cmpl_ring_id)
6034{
bbf33d1d
EP
6035 struct hwrm_ring_free_output *resp;
6036 struct hwrm_ring_free_input *req;
6037 u16 error_code = 0;
c0c050c5 6038 int rc;
c0c050c5 6039
b340dc68 6040 if (BNXT_NO_FW_ACCESS(bp))
b4fff207
MC
6041 return 0;
6042
bbf33d1d
EP
6043 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6044 if (rc)
6045 goto exit;
c0c050c5 6046
bbf33d1d
EP
6047 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6048 req->ring_type = ring_type;
6049 req->ring_id = cpu_to_le16(ring->fw_ring_id);
c0c050c5 6050
bbf33d1d
EP
6051 resp = hwrm_req_hold(bp, req);
6052 rc = hwrm_req_send(bp, req);
6053 error_code = le16_to_cpu(resp->error_code);
6054 hwrm_req_drop(bp, req);
6055exit:
c0c050c5 6056 if (rc || error_code) {
2727c888
MC
6057 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6058 ring_type, rc, error_code);
6059 return -EIO;
c0c050c5
MC
6060 }
6061 return 0;
6062}
6063
edd0c2cc 6064static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 6065{
23aefdd7 6066 u32 type;
edd0c2cc 6067 int i;
c0c050c5
MC
6068
6069 if (!bp->bnapi)
edd0c2cc 6070 return;
c0c050c5 6071
edd0c2cc 6072 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 6073 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 6074 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
6075
6076 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
6077 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6078
edd0c2cc
MC
6079 hwrm_ring_free_send_msg(bp, ring,
6080 RING_FREE_REQ_RING_TYPE_TX,
6081 close_path ? cmpl_ring_id :
6082 INVALID_HW_RING_ID);
6083 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
6084 }
6085 }
6086
edd0c2cc 6087 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 6088 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 6089 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 6090 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
6091
6092 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
6093 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6094
edd0c2cc
MC
6095 hwrm_ring_free_send_msg(bp, ring,
6096 RING_FREE_REQ_RING_TYPE_RX,
6097 close_path ? cmpl_ring_id :
6098 INVALID_HW_RING_ID);
6099 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
6100 bp->grp_info[grp_idx].rx_fw_ring_id =
6101 INVALID_HW_RING_ID;
c0c050c5
MC
6102 }
6103 }
6104
23aefdd7
MC
6105 if (bp->flags & BNXT_FLAG_CHIP_P5)
6106 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6107 else
6108 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 6109 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 6110 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 6111 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 6112 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
6113
6114 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
6115 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6116
23aefdd7 6117 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
6118 close_path ? cmpl_ring_id :
6119 INVALID_HW_RING_ID);
6120 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
6121 bp->grp_info[grp_idx].agg_fw_ring_id =
6122 INVALID_HW_RING_ID;
c0c050c5
MC
6123 }
6124 }
6125
9d8bc097
MC
6126 /* The completion rings are about to be freed. After that the
6127 * IRQ doorbell will not work anymore. So we need to disable
6128 * IRQ here.
6129 */
6130 bnxt_disable_int_sync(bp);
6131
23aefdd7
MC
6132 if (bp->flags & BNXT_FLAG_CHIP_P5)
6133 type = RING_FREE_REQ_RING_TYPE_NQ;
6134 else
6135 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
6136 for (i = 0; i < bp->cp_nr_rings; i++) {
6137 struct bnxt_napi *bnapi = bp->bnapi[i];
6138 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
6139 struct bnxt_ring_struct *ring;
6140 int j;
edd0c2cc 6141
3e08b184
MC
6142 for (j = 0; j < 2; j++) {
6143 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6144
6145 if (cpr2) {
6146 ring = &cpr2->cp_ring_struct;
6147 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6148 continue;
6149 hwrm_ring_free_send_msg(bp, ring,
6150 RING_FREE_REQ_RING_TYPE_L2_CMPL,
6151 INVALID_HW_RING_ID);
6152 ring->fw_ring_id = INVALID_HW_RING_ID;
6153 }
6154 }
6155 ring = &cpr->cp_ring_struct;
edd0c2cc 6156 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 6157 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
6158 INVALID_HW_RING_ID);
6159 ring->fw_ring_id = INVALID_HW_RING_ID;
6160 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
6161 }
6162 }
c0c050c5
MC
6163}
6164
41e8d798
MC
6165static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6166 bool shared);
6167
674f50a5
MC
6168static int bnxt_hwrm_get_rings(struct bnxt *bp)
6169{
674f50a5 6170 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
bbf33d1d
EP
6171 struct hwrm_func_qcfg_output *resp;
6172 struct hwrm_func_qcfg_input *req;
674f50a5
MC
6173 int rc;
6174
6175 if (bp->hwrm_spec_code < 0x10601)
6176 return 0;
6177
bbf33d1d
EP
6178 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6179 if (rc)
6180 return rc;
6181
6182 req->fid = cpu_to_le16(0xffff);
6183 resp = hwrm_req_hold(bp, req);
6184 rc = hwrm_req_send(bp, req);
674f50a5 6185 if (rc) {
bbf33d1d 6186 hwrm_req_drop(bp, req);
d4f1420d 6187 return rc;
674f50a5
MC
6188 }
6189
6190 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 6191 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
6192 u16 cp, stats;
6193
6194 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6195 hw_resc->resv_hw_ring_grps =
6196 le32_to_cpu(resp->alloc_hw_ring_grps);
6197 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6198 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6199 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 6200 hw_resc->resv_irqs = cp;
41e8d798
MC
6201 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6202 int rx = hw_resc->resv_rx_rings;
6203 int tx = hw_resc->resv_tx_rings;
6204
6205 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6206 rx >>= 1;
6207 if (cp < (rx + tx)) {
6208 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6209 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6210 rx <<= 1;
6211 hw_resc->resv_rx_rings = rx;
6212 hw_resc->resv_tx_rings = tx;
6213 }
75720e63 6214 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
6215 hw_resc->resv_hw_ring_grps = rx;
6216 }
674f50a5 6217 hw_resc->resv_cp_rings = cp;
780baad4 6218 hw_resc->resv_stat_ctxs = stats;
674f50a5 6219 }
bbf33d1d 6220 hwrm_req_drop(bp, req);
674f50a5
MC
6221 return 0;
6222}
6223
391be5c2
MC
6224int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6225{
bbf33d1d
EP
6226 struct hwrm_func_qcfg_output *resp;
6227 struct hwrm_func_qcfg_input *req;
391be5c2
MC
6228 int rc;
6229
6230 if (bp->hwrm_spec_code < 0x10601)
6231 return 0;
6232
bbf33d1d
EP
6233 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6234 if (rc)
6235 return rc;
6236
6237 req->fid = cpu_to_le16(fid);
6238 resp = hwrm_req_hold(bp, req);
6239 rc = hwrm_req_send(bp, req);
391be5c2
MC
6240 if (!rc)
6241 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6242
bbf33d1d 6243 hwrm_req_drop(bp, req);
391be5c2
MC
6244 return rc;
6245}
6246
41e8d798
MC
6247static bool bnxt_rfs_supported(struct bnxt *bp);
6248
bbf33d1d
EP
6249static struct hwrm_func_cfg_input *
6250__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6251 int ring_grps, int cp_rings, int stats, int vnics)
391be5c2 6252{
bbf33d1d 6253 struct hwrm_func_cfg_input *req;
674f50a5 6254 u32 enables = 0;
391be5c2 6255
bbf33d1d
EP
6256 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6257 return NULL;
6258
4ed50ef4 6259 req->fid = cpu_to_le16(0xffff);
674f50a5 6260 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 6261 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 6262 if (BNXT_NEW_RM(bp)) {
674f50a5 6263 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 6264 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6265 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6266 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6267 enables |= tx_rings + ring_grps ?
3f93cd3f 6268 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6269 enables |= rx_rings ?
6270 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6271 } else {
6272 enables |= cp_rings ?
3f93cd3f 6273 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6274 enables |= ring_grps ?
6275 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6276 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6277 }
dbe80d44 6278 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 6279
4ed50ef4 6280 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6281 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6282 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6283 req->num_msix = cpu_to_le16(cp_rings);
6284 req->num_rsscos_ctxs =
6285 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6286 } else {
6287 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6288 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6289 req->num_rsscos_ctxs = cpu_to_le16(1);
6290 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6291 bnxt_rfs_supported(bp))
6292 req->num_rsscos_ctxs =
6293 cpu_to_le16(ring_grps + 1);
6294 }
780baad4 6295 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 6296 req->num_vnics = cpu_to_le16(vnics);
674f50a5 6297 }
4ed50ef4 6298 req->enables = cpu_to_le32(enables);
bbf33d1d 6299 return req;
4ed50ef4
MC
6300}
6301
bbf33d1d
EP
6302static struct hwrm_func_vf_cfg_input *
6303__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6304 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4 6305{
bbf33d1d 6306 struct hwrm_func_vf_cfg_input *req;
4ed50ef4
MC
6307 u32 enables = 0;
6308
bbf33d1d
EP
6309 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6310 return NULL;
6311
4ed50ef4 6312 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
6313 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6314 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 6315 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6316 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6317 enables |= tx_rings + ring_grps ?
3f93cd3f 6318 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6319 } else {
6320 enables |= cp_rings ?
3f93cd3f 6321 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6322 enables |= ring_grps ?
6323 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6324 }
4ed50ef4 6325 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 6326 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 6327
41e8d798 6328 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
6329 req->num_tx_rings = cpu_to_le16(tx_rings);
6330 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6331 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6332 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6333 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6334 } else {
6335 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6336 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6337 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6338 }
780baad4 6339 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
6340 req->num_vnics = cpu_to_le16(vnics);
6341
6342 req->enables = cpu_to_le32(enables);
bbf33d1d 6343 return req;
4ed50ef4
MC
6344}
6345
6346static int
6347bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6348 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4 6349{
bbf33d1d 6350 struct hwrm_func_cfg_input *req;
4ed50ef4
MC
6351 int rc;
6352
bbf33d1d
EP
6353 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6354 cp_rings, stats, vnics);
6355 if (!req)
6356 return -ENOMEM;
6357
6358 if (!req->enables) {
6359 hwrm_req_drop(bp, req);
391be5c2 6360 return 0;
bbf33d1d 6361 }
391be5c2 6362
bbf33d1d 6363 rc = hwrm_req_send(bp, req);
674f50a5 6364 if (rc)
d4f1420d 6365 return rc;
674f50a5
MC
6366
6367 if (bp->hwrm_spec_code < 0x10601)
6368 bp->hw_resc.resv_tx_rings = tx_rings;
6369
9f90445c 6370 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6371}
6372
6373static int
6374bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6375 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5 6376{
bbf33d1d 6377 struct hwrm_func_vf_cfg_input *req;
674f50a5
MC
6378 int rc;
6379
f1ca94de 6380 if (!BNXT_NEW_RM(bp)) {
674f50a5 6381 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 6382 return 0;
674f50a5 6383 }
391be5c2 6384
bbf33d1d
EP
6385 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6386 cp_rings, stats, vnics);
6387 if (!req)
6388 return -ENOMEM;
6389
6390 rc = hwrm_req_send(bp, req);
674f50a5 6391 if (rc)
d4f1420d 6392 return rc;
674f50a5 6393
9f90445c 6394 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6395}
6396
6397static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 6398 int cp, int stat, int vnic)
674f50a5
MC
6399{
6400 if (BNXT_PF(bp))
780baad4
VV
6401 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6402 vnic);
674f50a5 6403 else
780baad4
VV
6404 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6405 vnic);
674f50a5
MC
6406}
6407
b16b6891 6408int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
6409{
6410 int cp = bp->cp_nr_rings;
6411 int ulp_msix, ulp_base;
6412
6413 ulp_msix = bnxt_get_ulp_msix_num(bp);
6414 if (ulp_msix) {
6415 ulp_base = bnxt_get_ulp_msix_base(bp);
6416 cp += ulp_msix;
6417 if ((ulp_base + ulp_msix) > cp)
6418 cp = ulp_base + ulp_msix;
6419 }
6420 return cp;
6421}
6422
c0b8cda0
MC
6423static int bnxt_cp_rings_in_use(struct bnxt *bp)
6424{
6425 int cp;
6426
6427 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6428 return bnxt_nq_rings_in_use(bp);
6429
6430 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6431 return cp;
6432}
6433
780baad4
VV
6434static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6435{
d77b1ad8
MC
6436 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6437 int cp = bp->cp_nr_rings;
6438
6439 if (!ulp_stat)
6440 return cp;
6441
6442 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6443 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6444
6445 return cp + ulp_stat;
780baad4
VV
6446}
6447
b43b9f53
MC
6448/* Check if a default RSS map needs to be setup. This function is only
6449 * used on older firmware that does not require reserving RX rings.
6450 */
6451static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6452{
6453 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6454
6455 /* The RSS map is valid for RX rings set to resv_rx_rings */
6456 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6457 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6458 if (!netif_is_rxfh_configured(bp->dev))
6459 bnxt_set_dflt_rss_indir_tbl(bp);
6460 }
6461}
6462
4e41dc5d
MC
6463static bool bnxt_need_reserve_rings(struct bnxt *bp)
6464{
6465 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 6466 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 6467 int nq = bnxt_nq_rings_in_use(bp);
780baad4 6468 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
6469 int vnic = 1, grp = rx;
6470
b43b9f53
MC
6471 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6472 bp->hwrm_spec_code >= 0x10601)
4e41dc5d
MC
6473 return true;
6474
b43b9f53
MC
6475 /* Old firmware does not need RX ring reservations but we still
6476 * need to setup a default RSS map when needed. With new firmware
6477 * we go through RX ring reservations first and then set up the
6478 * RSS map for the successfully reserved RX rings when needed.
6479 */
6480 if (!BNXT_NEW_RM(bp)) {
6481 bnxt_check_rss_tbl_no_rmgr(bp);
6482 return false;
6483 }
41e8d798 6484 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
6485 vnic = rx + 1;
6486 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6487 rx <<= 1;
780baad4 6488 stat = bnxt_get_func_stat_ctxs(bp);
b43b9f53
MC
6489 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6490 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6491 (hw_resc->resv_hw_ring_grps != grp &&
6492 !(bp->flags & BNXT_FLAG_CHIP_P5)))
4e41dc5d 6493 return true;
01989c6b
MC
6494 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6495 hw_resc->resv_irqs != nq)
6496 return true;
4e41dc5d
MC
6497 return false;
6498}
6499
674f50a5
MC
6500static int __bnxt_reserve_rings(struct bnxt *bp)
6501{
6502 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 6503 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
6504 int tx = bp->tx_nr_rings;
6505 int rx = bp->rx_nr_rings;
674f50a5 6506 int grp, rx_rings, rc;
780baad4 6507 int vnic = 1, stat;
674f50a5 6508 bool sh = false;
674f50a5 6509
4e41dc5d 6510 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
6511 return 0;
6512
6513 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6514 sh = true;
41e8d798 6515 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
6516 vnic = rx + 1;
6517 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6518 rx <<= 1;
674f50a5 6519 grp = bp->rx_nr_rings;
780baad4 6520 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 6521
780baad4 6522 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
6523 if (rc)
6524 return rc;
6525
674f50a5 6526 tx = hw_resc->resv_tx_rings;
f1ca94de 6527 if (BNXT_NEW_RM(bp)) {
674f50a5 6528 rx = hw_resc->resv_rx_rings;
c0b8cda0 6529 cp = hw_resc->resv_irqs;
674f50a5
MC
6530 grp = hw_resc->resv_hw_ring_grps;
6531 vnic = hw_resc->resv_vnics;
780baad4 6532 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
6533 }
6534
6535 rx_rings = rx;
6536 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6537 if (rx >= 2) {
6538 rx_rings = rx >> 1;
6539 } else {
6540 if (netif_running(bp->dev))
6541 return -ENOMEM;
6542
6543 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6544 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6545 bp->dev->hw_features &= ~NETIF_F_LRO;
6546 bp->dev->features &= ~NETIF_F_LRO;
6547 bnxt_set_ring_params(bp);
6548 }
6549 }
6550 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
6551 cp = min_t(int, cp, bp->cp_nr_rings);
6552 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6553 stat -= bnxt_get_ulp_stat_ctxs(bp);
6554 cp = min_t(int, cp, stat);
674f50a5
MC
6555 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6556 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6557 rx = rx_rings << 1;
6558 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6559 bp->tx_nr_rings = tx;
bd3191b5
MC
6560
6561 /* If we cannot reserve all the RX rings, reset the RSS map only
6562 * if absolutely necessary
6563 */
6564 if (rx_rings != bp->rx_nr_rings) {
6565 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6566 rx_rings, bp->rx_nr_rings);
4b70dce2 6567 if (netif_is_rxfh_configured(bp->dev) &&
bd3191b5
MC
6568 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6569 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6570 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6571 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6572 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6573 }
6574 }
674f50a5
MC
6575 bp->rx_nr_rings = rx_rings;
6576 bp->cp_nr_rings = cp;
6577
780baad4 6578 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
6579 return -ENOMEM;
6580
5fa65524
EP
6581 if (!netif_is_rxfh_configured(bp->dev))
6582 bnxt_set_dflt_rss_indir_tbl(bp);
6583
391be5c2
MC
6584 return rc;
6585}
6586
8f23d638 6587static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6588 int ring_grps, int cp_rings, int stats,
6589 int vnics)
98fdbe73 6590{
bbf33d1d 6591 struct hwrm_func_vf_cfg_input *req;
6fc2ffdf 6592 u32 flags;
98fdbe73 6593
f1ca94de 6594 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
6595 return 0;
6596
bbf33d1d
EP
6597 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6598 cp_rings, stats, vnics);
8f23d638
MC
6599 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6600 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6601 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 6602 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
6603 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6604 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6605 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6606 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638 6607
bbf33d1d
EP
6608 req->flags = cpu_to_le32(flags);
6609 return hwrm_req_send_silent(bp, req);
8f23d638
MC
6610}
6611
6612static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6613 int ring_grps, int cp_rings, int stats,
6614 int vnics)
8f23d638 6615{
bbf33d1d 6616 struct hwrm_func_cfg_input *req;
6fc2ffdf 6617 u32 flags;
98fdbe73 6618
bbf33d1d
EP
6619 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6620 cp_rings, stats, vnics);
8f23d638 6621 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 6622 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
6623 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6624 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
6625 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6626 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 6627 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
6628 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6629 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
6630 else
6631 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6632 }
6fc2ffdf 6633
bbf33d1d
EP
6634 req->flags = cpu_to_le32(flags);
6635 return hwrm_req_send_silent(bp, req);
98fdbe73
MC
6636}
6637
8f23d638 6638static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6639 int ring_grps, int cp_rings, int stats,
6640 int vnics)
8f23d638
MC
6641{
6642 if (bp->hwrm_spec_code < 0x10801)
6643 return 0;
6644
6645 if (BNXT_PF(bp))
6646 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
6647 ring_grps, cp_rings, stats,
6648 vnics);
8f23d638
MC
6649
6650 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 6651 cp_rings, stats, vnics);
8f23d638
MC
6652}
6653
74706afa
MC
6654static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6655{
74706afa 6656 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
bbf33d1d
EP
6657 struct hwrm_ring_aggint_qcaps_output *resp;
6658 struct hwrm_ring_aggint_qcaps_input *req;
74706afa
MC
6659 int rc;
6660
6661 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6662 coal_cap->num_cmpl_dma_aggr_max = 63;
6663 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6664 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6665 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6666 coal_cap->int_lat_tmr_min_max = 65535;
6667 coal_cap->int_lat_tmr_max_max = 65535;
6668 coal_cap->num_cmpl_aggr_int_max = 65535;
6669 coal_cap->timer_units = 80;
6670
6671 if (bp->hwrm_spec_code < 0x10902)
6672 return;
6673
bbf33d1d
EP
6674 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6675 return;
6676
6677 resp = hwrm_req_hold(bp, req);
6678 rc = hwrm_req_send_silent(bp, req);
74706afa
MC
6679 if (!rc) {
6680 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 6681 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
6682 coal_cap->num_cmpl_dma_aggr_max =
6683 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6684 coal_cap->num_cmpl_dma_aggr_during_int_max =
6685 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6686 coal_cap->cmpl_aggr_dma_tmr_max =
6687 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6688 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6689 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6690 coal_cap->int_lat_tmr_min_max =
6691 le16_to_cpu(resp->int_lat_tmr_min_max);
6692 coal_cap->int_lat_tmr_max_max =
6693 le16_to_cpu(resp->int_lat_tmr_max_max);
6694 coal_cap->num_cmpl_aggr_int_max =
6695 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6696 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6697 }
bbf33d1d 6698 hwrm_req_drop(bp, req);
74706afa
MC
6699}
6700
6701static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6702{
6703 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6704
6705 return usec * 1000 / coal_cap->timer_units;
6706}
6707
6708static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6709 struct bnxt_coal *hw_coal,
bb053f52
MC
6710 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6711{
74706afa 6712 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
df78ea22 6713 u16 val, tmr, max, flags = hw_coal->flags;
74706afa 6714 u32 cmpl_params = coal_cap->cmpl_params;
f8503969
MC
6715
6716 max = hw_coal->bufs_per_record * 128;
6717 if (hw_coal->budget)
6718 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 6719 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
6720
6721 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6722 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 6723
74706afa 6724 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
6725 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6726
74706afa
MC
6727 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6728 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
6729 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6730
74706afa
MC
6731 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6732 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6733 req->int_lat_tmr_max = cpu_to_le16(tmr);
6734
6735 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6736 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6737 val = tmr / 2;
6738 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6739 req->int_lat_tmr_min = cpu_to_le16(val);
6740 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6741 }
f8503969
MC
6742
6743 /* buf timer set to 1/4 of interrupt timer */
74706afa 6744 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6745 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6746
74706afa
MC
6747 if (cmpl_params &
6748 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6749 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6750 val = clamp_t(u16, tmr, 1,
6751 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6adc4601 6752 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
74706afa
MC
6753 req->enables |=
6754 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6755 }
f8503969 6756
74706afa
MC
6757 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6758 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6759 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6760 req->flags = cpu_to_le16(flags);
74706afa 6761 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6762}
6763
58590c8d
MC
6764static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6765 struct bnxt_coal *hw_coal)
6766{
bbf33d1d 6767 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
58590c8d
MC
6768 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6769 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6770 u32 nq_params = coal_cap->nq_params;
6771 u16 tmr;
bbf33d1d 6772 int rc;
58590c8d
MC
6773
6774 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6775 return 0;
6776
bbf33d1d
EP
6777 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6778 if (rc)
6779 return rc;
6780
6781 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6782 req->flags =
58590c8d
MC
6783 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6784
6785 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6786 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
bbf33d1d
EP
6787 req->int_lat_tmr_min = cpu_to_le16(tmr);
6788 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6789 return hwrm_req_send(bp, req);
58590c8d
MC
6790}
6791
6a8788f2
AG
6792int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6793{
bbf33d1d 6794 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6a8788f2
AG
6795 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6796 struct bnxt_coal coal;
bbf33d1d 6797 int rc;
6a8788f2
AG
6798
6799 /* Tick values in micro seconds.
6800 * 1 coal_buf x bufs_per_record = 1 completion record.
6801 */
6802 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6803
6804 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6805 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6806
6807 if (!bnapi->rx_ring)
6808 return -ENODEV;
6809
bbf33d1d
EP
6810 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6811 if (rc)
6812 return rc;
6a8788f2 6813
bbf33d1d 6814 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6a8788f2 6815
bbf33d1d 6816 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2 6817
bbf33d1d 6818 return hwrm_req_send(bp, req_rx);
6a8788f2
AG
6819}
6820
c0c050c5
MC
6821int bnxt_hwrm_set_coal(struct bnxt *bp)
6822{
bbf33d1d
EP
6823 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6824 *req;
6825 int i, rc;
6826
6827 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6828 if (rc)
6829 return rc;
c0c050c5 6830
bbf33d1d
EP
6831 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6832 if (rc) {
6833 hwrm_req_drop(bp, req_rx);
6834 return rc;
6835 }
c0c050c5 6836
bbf33d1d
EP
6837 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6838 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
c0c050c5 6839
bbf33d1d
EP
6840 hwrm_req_hold(bp, req_rx);
6841 hwrm_req_hold(bp, req_tx);
c0c050c5 6842 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6843 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6844 struct bnxt_coal *hw_coal;
2c61d211 6845 u16 ring_id;
c0c050c5 6846
bbf33d1d 6847 req = req_rx;
2c61d211
MC
6848 if (!bnapi->rx_ring) {
6849 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
bbf33d1d 6850 req = req_tx;
2c61d211
MC
6851 } else {
6852 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6853 }
6854 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a 6855
bbf33d1d 6856 rc = hwrm_req_send(bp, req);
c0c050c5
MC
6857 if (rc)
6858 break;
58590c8d
MC
6859
6860 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6861 continue;
6862
6863 if (bnapi->rx_ring && bnapi->tx_ring) {
bbf33d1d 6864 req = req_tx;
58590c8d
MC
6865 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6866 req->ring_id = cpu_to_le16(ring_id);
bbf33d1d 6867 rc = hwrm_req_send(bp, req);
58590c8d
MC
6868 if (rc)
6869 break;
6870 }
6871 if (bnapi->rx_ring)
6872 hw_coal = &bp->rx_coal;
6873 else
6874 hw_coal = &bp->tx_coal;
6875 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5 6876 }
bbf33d1d
EP
6877 hwrm_req_drop(bp, req_rx);
6878 hwrm_req_drop(bp, req_tx);
c0c050c5
MC
6879 return rc;
6880}
6881
3d061591 6882static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
c0c050c5 6883{
bbf33d1d
EP
6884 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6885 struct hwrm_stat_ctx_free_input *req;
3d061591 6886 int i;
c0c050c5
MC
6887
6888 if (!bp->bnapi)
3d061591 6889 return;
c0c050c5 6890
3e8060fa 6891 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3d061591 6892 return;
3e8060fa 6893
bbf33d1d
EP
6894 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6895 return;
6896 if (BNXT_FW_MAJ(bp) <= 20) {
6897 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6898 hwrm_req_drop(bp, req);
6899 return;
6900 }
6901 hwrm_req_hold(bp, req0);
6902 }
6903 hwrm_req_hold(bp, req);
c0c050c5
MC
6904 for (i = 0; i < bp->cp_nr_rings; i++) {
6905 struct bnxt_napi *bnapi = bp->bnapi[i];
6906 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6907
6908 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
bbf33d1d
EP
6909 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6910 if (req0) {
6911 req0->stat_ctx_id = req->stat_ctx_id;
6912 hwrm_req_send(bp, req0);
c2dec363 6913 }
bbf33d1d 6914 hwrm_req_send(bp, req);
c0c050c5
MC
6915
6916 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6917 }
6918 }
bbf33d1d
EP
6919 hwrm_req_drop(bp, req);
6920 if (req0)
6921 hwrm_req_drop(bp, req0);
c0c050c5
MC
6922}
6923
6924static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6925{
bbf33d1d
EP
6926 struct hwrm_stat_ctx_alloc_output *resp;
6927 struct hwrm_stat_ctx_alloc_input *req;
6928 int rc, i;
c0c050c5 6929
3e8060fa
PS
6930 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6931 return 0;
6932
bbf33d1d
EP
6933 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6934 if (rc)
6935 return rc;
c0c050c5 6936
bbf33d1d
EP
6937 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6938 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5 6939
bbf33d1d 6940 resp = hwrm_req_hold(bp, req);
c0c050c5
MC
6941 for (i = 0; i < bp->cp_nr_rings; i++) {
6942 struct bnxt_napi *bnapi = bp->bnapi[i];
6943 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6944
bbf33d1d 6945 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
c0c050c5 6946
bbf33d1d 6947 rc = hwrm_req_send(bp, req);
c0c050c5
MC
6948 if (rc)
6949 break;
6950
6951 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6952
6953 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6954 }
bbf33d1d 6955 hwrm_req_drop(bp, req);
89aa8445 6956 return rc;
c0c050c5
MC
6957}
6958
cf6645f8
MC
6959static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6960{
bbf33d1d
EP
6961 struct hwrm_func_qcfg_output *resp;
6962 struct hwrm_func_qcfg_input *req;
8ae24738 6963 u32 min_db_offset = 0;
9315edca 6964 u16 flags;
cf6645f8
MC
6965 int rc;
6966
bbf33d1d
EP
6967 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6968 if (rc)
6969 return rc;
6970
6971 req->fid = cpu_to_le16(0xffff);
6972 resp = hwrm_req_hold(bp, req);
6973 rc = hwrm_req_send(bp, req);
cf6645f8
MC
6974 if (rc)
6975 goto func_qcfg_exit;
6976
6977#ifdef CONFIG_BNXT_SRIOV
6978 if (BNXT_VF(bp)) {
cf6645f8
MC
6979 struct bnxt_vf_info *vf = &bp->vf;
6980
6981 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
230d1f0d
MC
6982 } else {
6983 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
cf6645f8
MC
6984 }
6985#endif
9315edca
MC
6986 flags = le16_to_cpu(resp->flags);
6987 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6988 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6989 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6990 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6991 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca 6992 }
85036aee 6993 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) {
9315edca 6994 bp->flags |= BNXT_FLAG_MULTI_HOST;
85036aee
PC
6995 if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
6996 bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC;
6997 }
8d4bd96b
MC
6998 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6999 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
bc39f885 7000
567b2abe
SB
7001 switch (resp->port_partition_type) {
7002 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
7003 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
7004 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
7005 bp->port_partition_type = resp->port_partition_type;
7006 break;
7007 }
32e8239c
MC
7008 if (bp->hwrm_spec_code < 0x10707 ||
7009 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
7010 bp->br_mode = BRIDGE_MODE_VEB;
7011 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
7012 bp->br_mode = BRIDGE_MODE_VEPA;
7013 else
7014 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 7015
7eb9bb3a
MC
7016 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
7017 if (!bp->max_mtu)
7018 bp->max_mtu = BNXT_MAX_MTU;
7019
8ae24738
MC
7020 if (bp->db_size)
7021 goto func_qcfg_exit;
7022
7023 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7024 if (BNXT_PF(bp))
7025 min_db_offset = DB_PF_OFFSET_P5;
7026 else
7027 min_db_offset = DB_VF_OFFSET_P5;
7028 }
7029 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
7030 1024);
7031 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
7032 bp->db_size <= min_db_offset)
7033 bp->db_size = pci_resource_len(bp->pdev, 2);
7034
cf6645f8 7035func_qcfg_exit:
bbf33d1d 7036 hwrm_req_drop(bp, req);
cf6645f8
MC
7037 return rc;
7038}
7039
e9696ff3
MC
7040static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
7041 struct hwrm_func_backing_store_qcaps_output *resp)
7042{
7043 struct bnxt_mem_init *mem_init;
41435c39 7044 u16 init_mask;
e9696ff3 7045 u8 init_val;
41435c39 7046 u8 *offset;
e9696ff3
MC
7047 int i;
7048
7049 init_val = resp->ctx_kind_initializer;
41435c39
MC
7050 init_mask = le16_to_cpu(resp->ctx_init_mask);
7051 offset = &resp->qp_init_offset;
7052 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7053 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
e9696ff3 7054 mem_init->init_val = init_val;
41435c39
MC
7055 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
7056 if (!init_mask)
7057 continue;
7058 if (i == BNXT_CTX_MEM_INIT_STAT)
7059 offset = &resp->stat_init_offset;
7060 if (init_mask & (1 << i))
7061 mem_init->offset = *offset * 4;
7062 else
7063 mem_init->init_val = 0;
7064 }
7065 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
7066 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
7067 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
7068 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
7069 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
7070 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
e9696ff3
MC
7071}
7072
98f04cf0
MC
7073static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7074{
bbf33d1d
EP
7075 struct hwrm_func_backing_store_qcaps_output *resp;
7076 struct hwrm_func_backing_store_qcaps_input *req;
98f04cf0
MC
7077 int rc;
7078
7079 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7080 return 0;
7081
bbf33d1d
EP
7082 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
7083 if (rc)
7084 return rc;
7085
7086 resp = hwrm_req_hold(bp, req);
7087 rc = hwrm_req_send_silent(bp, req);
98f04cf0
MC
7088 if (!rc) {
7089 struct bnxt_ctx_pg_info *ctx_pg;
7090 struct bnxt_ctx_mem_info *ctx;
ac3158cb 7091 int i, tqm_rings;
98f04cf0
MC
7092
7093 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7094 if (!ctx) {
7095 rc = -ENOMEM;
7096 goto ctx_err;
7097 }
98f04cf0
MC
7098 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
7099 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
7100 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7101 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
7102 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
7103 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
7104 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
7105 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
7106 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
7107 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
7108 ctx->vnic_max_vnic_entries =
7109 le16_to_cpu(resp->vnic_max_vnic_entries);
7110 ctx->vnic_max_ring_table_entries =
7111 le16_to_cpu(resp->vnic_max_ring_table_entries);
7112 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
7113 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
7114 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
7115 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
7116 ctx->tqm_min_entries_per_ring =
7117 le32_to_cpu(resp->tqm_min_entries_per_ring);
7118 ctx->tqm_max_entries_per_ring =
7119 le32_to_cpu(resp->tqm_max_entries_per_ring);
7120 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
7121 if (!ctx->tqm_entries_multiple)
7122 ctx->tqm_entries_multiple = 1;
7123 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
7124 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
7125 ctx->mrav_num_entries_units =
7126 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
7127 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
7128 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
e9696ff3
MC
7129
7130 bnxt_init_ctx_initializer(ctx, resp);
7131
ac3158cb
MC
7132 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7133 if (!ctx->tqm_fp_rings_count)
7134 ctx->tqm_fp_rings_count = bp->max_q;
a029a2fe
MC
7135 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7136 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
ac3158cb 7137
a029a2fe 7138 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ac3158cb
MC
7139 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
7140 if (!ctx_pg) {
7141 kfree(ctx);
7142 rc = -ENOMEM;
7143 goto ctx_err;
7144 }
7145 for (i = 0; i < tqm_rings; i++, ctx_pg++)
7146 ctx->tqm_mem[i] = ctx_pg;
7147 bp->ctx = ctx;
98f04cf0
MC
7148 } else {
7149 rc = 0;
7150 }
7151ctx_err:
bbf33d1d 7152 hwrm_req_drop(bp, req);
98f04cf0
MC
7153 return rc;
7154}
7155
1b9394e5
MC
7156static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7157 __le64 *pg_dir)
7158{
be6d755f
EP
7159 if (!rmem->nr_pages)
7160 return;
7161
702279d2 7162 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
08fe9d18
MC
7163 if (rmem->depth >= 1) {
7164 if (rmem->depth == 2)
7165 *pg_attr |= 2;
7166 else
7167 *pg_attr |= 1;
1b9394e5
MC
7168 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7169 } else {
7170 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7171 }
7172}
7173
7174#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
7175 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
7176 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
7177 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
7178 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
7179 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7180
7181static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7182{
bbf33d1d 7183 struct hwrm_func_backing_store_cfg_input *req;
1b9394e5
MC
7184 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7185 struct bnxt_ctx_pg_info *ctx_pg;
bbf33d1d
EP
7186 void **__req = (void **)&req;
7187 u32 req_len = sizeof(*req);
1b9394e5
MC
7188 __le32 *num_entries;
7189 __le64 *pg_dir;
53579e37 7190 u32 flags = 0;
1b9394e5 7191 u8 *pg_attr;
1b9394e5 7192 u32 ena;
bbf33d1d 7193 int rc;
9f90445c 7194 int i;
1b9394e5
MC
7195
7196 if (!ctx)
7197 return 0;
7198
16db6323
MC
7199 if (req_len > bp->hwrm_max_ext_req_len)
7200 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bbf33d1d
EP
7201 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7202 if (rc)
7203 return rc;
1b9394e5 7204
bbf33d1d 7205 req->enables = cpu_to_le32(enables);
1b9394e5
MC
7206 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7207 ctx_pg = &ctx->qp_mem;
bbf33d1d
EP
7208 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7209 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7210 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7211 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
1b9394e5 7212 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7213 &req->qpc_pg_size_qpc_lvl,
7214 &req->qpc_page_dir);
1b9394e5
MC
7215 }
7216 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7217 ctx_pg = &ctx->srq_mem;
bbf33d1d
EP
7218 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7219 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7220 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
1b9394e5 7221 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7222 &req->srq_pg_size_srq_lvl,
7223 &req->srq_page_dir);
1b9394e5
MC
7224 }
7225 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7226 ctx_pg = &ctx->cq_mem;
bbf33d1d
EP
7227 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7228 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7229 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7230 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7231 &req->cq_pg_size_cq_lvl,
7232 &req->cq_page_dir);
1b9394e5
MC
7233 }
7234 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7235 ctx_pg = &ctx->vnic_mem;
bbf33d1d 7236 req->vnic_num_vnic_entries =
1b9394e5 7237 cpu_to_le16(ctx->vnic_max_vnic_entries);
bbf33d1d 7238 req->vnic_num_ring_table_entries =
1b9394e5 7239 cpu_to_le16(ctx->vnic_max_ring_table_entries);
bbf33d1d 7240 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
1b9394e5 7241 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7242 &req->vnic_pg_size_vnic_lvl,
7243 &req->vnic_page_dir);
1b9394e5
MC
7244 }
7245 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7246 ctx_pg = &ctx->stat_mem;
bbf33d1d
EP
7247 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7248 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
1b9394e5 7249 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7250 &req->stat_pg_size_stat_lvl,
7251 &req->stat_page_dir);
1b9394e5 7252 }
cf6daed0
MC
7253 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7254 ctx_pg = &ctx->mrav_mem;
bbf33d1d 7255 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
7256 if (ctx->mrav_num_entries_units)
7257 flags |=
7258 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
bbf33d1d 7259 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
cf6daed0 7260 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7261 &req->mrav_pg_size_mrav_lvl,
7262 &req->mrav_page_dir);
cf6daed0
MC
7263 }
7264 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7265 ctx_pg = &ctx->tim_mem;
bbf33d1d
EP
7266 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7267 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
cf6daed0 7268 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7269 &req->tim_pg_size_tim_lvl,
7270 &req->tim_page_dir);
cf6daed0 7271 }
bbf33d1d
EP
7272 for (i = 0, num_entries = &req->tqm_sp_num_entries,
7273 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7274 pg_dir = &req->tqm_sp_page_dir,
1b9394e5 7275 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
a029a2fe
MC
7276 i < BNXT_MAX_TQM_RINGS;
7277 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
1b9394e5
MC
7278 if (!(enables & ena))
7279 continue;
7280
bbf33d1d 7281 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
1b9394e5
MC
7282 ctx_pg = ctx->tqm_mem[i];
7283 *num_entries = cpu_to_le32(ctx_pg->entries);
7284 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7285 }
bbf33d1d
EP
7286 req->flags = cpu_to_le32(flags);
7287 return hwrm_req_send(bp, req);
1b9394e5
MC
7288}
7289
98f04cf0 7290static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 7291 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
7292{
7293 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7294
98f04cf0
MC
7295 rmem->page_size = BNXT_PAGE_SIZE;
7296 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7297 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 7298 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
7299 if (rmem->depth >= 1)
7300 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
7301 return bnxt_alloc_ring(bp, rmem);
7302}
7303
08fe9d18
MC
7304static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7305 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
e9696ff3 7306 u8 depth, struct bnxt_mem_init *mem_init)
08fe9d18
MC
7307{
7308 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7309 int rc;
7310
7311 if (!mem_size)
bbf211b1 7312 return -EINVAL;
08fe9d18
MC
7313
7314 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7315 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7316 ctx_pg->nr_pages = 0;
7317 return -EINVAL;
7318 }
7319 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7320 int nr_tbls, i;
7321
7322 rmem->depth = 2;
7323 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7324 GFP_KERNEL);
7325 if (!ctx_pg->ctx_pg_tbl)
7326 return -ENOMEM;
7327 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7328 rmem->nr_pages = nr_tbls;
7329 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7330 if (rc)
7331 return rc;
7332 for (i = 0; i < nr_tbls; i++) {
7333 struct bnxt_ctx_pg_info *pg_tbl;
7334
7335 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7336 if (!pg_tbl)
7337 return -ENOMEM;
7338 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7339 rmem = &pg_tbl->ring_mem;
7340 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7341 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7342 rmem->depth = 1;
7343 rmem->nr_pages = MAX_CTX_PAGES;
e9696ff3 7344 rmem->mem_init = mem_init;
6ef982de
MC
7345 if (i == (nr_tbls - 1)) {
7346 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7347
7348 if (rem)
7349 rmem->nr_pages = rem;
7350 }
08fe9d18
MC
7351 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7352 if (rc)
7353 break;
7354 }
7355 } else {
7356 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7357 if (rmem->nr_pages > 1 || depth)
7358 rmem->depth = 1;
e9696ff3 7359 rmem->mem_init = mem_init;
08fe9d18
MC
7360 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7361 }
7362 return rc;
7363}
7364
7365static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7366 struct bnxt_ctx_pg_info *ctx_pg)
7367{
7368 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7369
7370 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7371 ctx_pg->ctx_pg_tbl) {
7372 int i, nr_tbls = rmem->nr_pages;
7373
7374 for (i = 0; i < nr_tbls; i++) {
7375 struct bnxt_ctx_pg_info *pg_tbl;
7376 struct bnxt_ring_mem_info *rmem2;
7377
7378 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7379 if (!pg_tbl)
7380 continue;
7381 rmem2 = &pg_tbl->ring_mem;
7382 bnxt_free_ring(bp, rmem2);
7383 ctx_pg->ctx_pg_arr[i] = NULL;
7384 kfree(pg_tbl);
7385 ctx_pg->ctx_pg_tbl[i] = NULL;
7386 }
7387 kfree(ctx_pg->ctx_pg_tbl);
7388 ctx_pg->ctx_pg_tbl = NULL;
7389 }
7390 bnxt_free_ring(bp, rmem);
7391 ctx_pg->nr_pages = 0;
7392}
7393
228ea8c1 7394void bnxt_free_ctx_mem(struct bnxt *bp)
98f04cf0
MC
7395{
7396 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7397 int i;
7398
7399 if (!ctx)
7400 return;
7401
7402 if (ctx->tqm_mem[0]) {
ac3158cb 7403 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
08fe9d18 7404 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
7405 kfree(ctx->tqm_mem[0]);
7406 ctx->tqm_mem[0] = NULL;
7407 }
7408
cf6daed0
MC
7409 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7410 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
7411 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7412 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7413 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7414 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7415 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
7416 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7417}
7418
7419static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7420{
7421 struct bnxt_ctx_pg_info *ctx_pg;
7422 struct bnxt_ctx_mem_info *ctx;
e9696ff3 7423 struct bnxt_mem_init *init;
1b9394e5 7424 u32 mem_size, ena, entries;
c7dd7ab4 7425 u32 entries_sp, min;
53579e37 7426 u32 num_mr, num_ah;
cf6daed0
MC
7427 u32 extra_srqs = 0;
7428 u32 extra_qps = 0;
7429 u8 pg_lvl = 1;
98f04cf0
MC
7430 int i, rc;
7431
7432 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7433 if (rc) {
7434 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7435 rc);
7436 return rc;
7437 }
7438 ctx = bp->ctx;
7439 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7440 return 0;
7441
d629522e 7442 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
7443 pg_lvl = 2;
7444 extra_qps = 65536;
7445 extra_srqs = 8192;
7446 }
7447
98f04cf0 7448 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
7449 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7450 extra_qps;
be6d755f
EP
7451 if (ctx->qp_entry_size) {
7452 mem_size = ctx->qp_entry_size * ctx_pg->entries;
e9696ff3
MC
7453 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7454 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7455 if (rc)
7456 return rc;
7457 }
98f04cf0
MC
7458
7459 ctx_pg = &ctx->srq_mem;
cf6daed0 7460 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
be6d755f
EP
7461 if (ctx->srq_entry_size) {
7462 mem_size = ctx->srq_entry_size * ctx_pg->entries;
e9696ff3
MC
7463 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7464 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7465 if (rc)
7466 return rc;
7467 }
98f04cf0
MC
7468
7469 ctx_pg = &ctx->cq_mem;
cf6daed0 7470 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
be6d755f
EP
7471 if (ctx->cq_entry_size) {
7472 mem_size = ctx->cq_entry_size * ctx_pg->entries;
e9696ff3
MC
7473 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7474 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7475 if (rc)
7476 return rc;
7477 }
98f04cf0
MC
7478
7479 ctx_pg = &ctx->vnic_mem;
7480 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7481 ctx->vnic_max_ring_table_entries;
be6d755f
EP
7482 if (ctx->vnic_entry_size) {
7483 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
e9696ff3
MC
7484 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7485 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7486 if (rc)
7487 return rc;
7488 }
98f04cf0
MC
7489
7490 ctx_pg = &ctx->stat_mem;
7491 ctx_pg->entries = ctx->stat_max_entries;
be6d755f
EP
7492 if (ctx->stat_entry_size) {
7493 mem_size = ctx->stat_entry_size * ctx_pg->entries;
e9696ff3
MC
7494 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7495 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7496 if (rc)
7497 return rc;
7498 }
98f04cf0 7499
cf6daed0
MC
7500 ena = 0;
7501 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7502 goto skip_rdma;
7503
7504 ctx_pg = &ctx->mrav_mem;
53579e37
DS
7505 /* 128K extra is needed to accommodate static AH context
7506 * allocation by f/w.
7507 */
7508 num_mr = 1024 * 256;
7509 num_ah = 1024 * 128;
7510 ctx_pg->entries = num_mr + num_ah;
be6d755f
EP
7511 if (ctx->mrav_entry_size) {
7512 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
e9696ff3
MC
7513 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7514 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
be6d755f
EP
7515 if (rc)
7516 return rc;
7517 }
cf6daed0 7518 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
7519 if (ctx->mrav_num_entries_units)
7520 ctx_pg->entries =
7521 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7522 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
7523
7524 ctx_pg = &ctx->tim_mem;
7525 ctx_pg->entries = ctx->qp_mem.entries;
be6d755f
EP
7526 if (ctx->tim_entry_size) {
7527 mem_size = ctx->tim_entry_size * ctx_pg->entries;
e9696ff3 7528 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
be6d755f
EP
7529 if (rc)
7530 return rc;
7531 }
cf6daed0
MC
7532 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7533
7534skip_rdma:
c7dd7ab4
MC
7535 min = ctx->tqm_min_entries_per_ring;
7536 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7537 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7538 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
c12e1643 7539 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
98f04cf0 7540 entries = roundup(entries, ctx->tqm_entries_multiple);
c7dd7ab4 7541 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
ac3158cb 7542 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
98f04cf0 7543 ctx_pg = ctx->tqm_mem[i];
c7dd7ab4 7544 ctx_pg->entries = i ? entries : entries_sp;
be6d755f
EP
7545 if (ctx->tqm_entry_size) {
7546 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7547 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
e9696ff3 7548 NULL);
be6d755f
EP
7549 if (rc)
7550 return rc;
7551 }
1b9394e5 7552 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 7553 }
1b9394e5
MC
7554 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7555 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
0b5b561c 7556 if (rc) {
1b9394e5
MC
7557 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7558 rc);
0b5b561c
MC
7559 return rc;
7560 }
7561 ctx->flags |= BNXT_CTX_FLAG_INITED;
98f04cf0
MC
7562 return 0;
7563}
7564
db4723b3 7565int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4 7566{
bbf33d1d
EP
7567 struct hwrm_func_resource_qcaps_output *resp;
7568 struct hwrm_func_resource_qcaps_input *req;
be0dd9c4
MC
7569 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7570 int rc;
7571
bbf33d1d
EP
7572 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7573 if (rc)
7574 return rc;
be0dd9c4 7575
bbf33d1d
EP
7576 req->fid = cpu_to_le16(0xffff);
7577 resp = hwrm_req_hold(bp, req);
7578 rc = hwrm_req_send_silent(bp, req);
d4f1420d 7579 if (rc)
be0dd9c4 7580 goto hwrm_func_resc_qcaps_exit;
be0dd9c4 7581
db4723b3
MC
7582 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7583 if (!all)
7584 goto hwrm_func_resc_qcaps_exit;
7585
be0dd9c4
MC
7586 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7587 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7588 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7589 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7590 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7591 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7592 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7593 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7594 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7595 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7596 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7597 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7598 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7599 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7600 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7601 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7602
9c1fabdf
MC
7603 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7604 u16 max_msix = le16_to_cpu(resp->max_msix);
7605
f7588cd8 7606 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
7607 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7608 }
7609
4673d664
MC
7610 if (BNXT_PF(bp)) {
7611 struct bnxt_pf_info *pf = &bp->pf;
7612
7613 pf->vf_resv_strategy =
7614 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 7615 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
7616 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7617 }
be0dd9c4 7618hwrm_func_resc_qcaps_exit:
bbf33d1d 7619 hwrm_req_drop(bp, req);
be0dd9c4
MC
7620 return rc;
7621}
7622
ae5c42f0
MC
7623static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7624{
bbf33d1d
EP
7625 struct hwrm_port_mac_ptp_qcfg_output *resp;
7626 struct hwrm_port_mac_ptp_qcfg_input *req;
ae5c42f0 7627 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
24ac1ecd 7628 bool phc_cfg;
ae5c42f0
MC
7629 u8 flags;
7630 int rc;
7631
7632 if (bp->hwrm_spec_code < 0x10801) {
7633 rc = -ENODEV;
7634 goto no_ptp;
7635 }
7636
bbf33d1d 7637 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
ae5c42f0
MC
7638 if (rc)
7639 goto no_ptp;
7640
bbf33d1d
EP
7641 req->port_id = cpu_to_le16(bp->pf.port_id);
7642 resp = hwrm_req_hold(bp, req);
7643 rc = hwrm_req_send(bp, req);
7644 if (rc)
7645 goto exit;
7646
ae5c42f0
MC
7647 flags = resp->flags;
7648 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7649 rc = -ENODEV;
bbf33d1d 7650 goto exit;
ae5c42f0
MC
7651 }
7652 if (!ptp) {
7653 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
bbf33d1d
EP
7654 if (!ptp) {
7655 rc = -ENOMEM;
7656 goto exit;
7657 }
ae5c42f0
MC
7658 ptp->bp = bp;
7659 bp->ptp_cfg = ptp;
7660 }
7661 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7662 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7663 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7664 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7665 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7666 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7667 } else {
7668 rc = -ENODEV;
bbf33d1d 7669 goto exit;
ae5c42f0 7670 }
24ac1ecd
PC
7671 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
7672 rc = bnxt_ptp_init(bp, phc_cfg);
bbf33d1d
EP
7673 if (rc)
7674 netdev_warn(bp->dev, "PTP initialization failed.\n");
7675exit:
7676 hwrm_req_drop(bp, req);
a521c8a0
MC
7677 if (!rc)
7678 return 0;
7679
ae5c42f0 7680no_ptp:
a521c8a0 7681 bnxt_ptp_clear(bp);
ae5c42f0
MC
7682 kfree(ptp);
7683 bp->ptp_cfg = NULL;
7684 return rc;
7685}
7686
be0dd9c4 7687static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5 7688{
bbf33d1d
EP
7689 struct hwrm_func_qcaps_output *resp;
7690 struct hwrm_func_qcaps_input *req;
6a4f2947 7691 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
66ed81dc 7692 u32 flags, flags_ext, flags_ext2;
bbf33d1d 7693 int rc;
c0c050c5 7694
bbf33d1d
EP
7695 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7696 if (rc)
7697 return rc;
c0c050c5 7698
bbf33d1d
EP
7699 req->fid = cpu_to_le16(0xffff);
7700 resp = hwrm_req_hold(bp, req);
7701 rc = hwrm_req_send(bp, req);
c0c050c5
MC
7702 if (rc)
7703 goto hwrm_func_qcaps_exit;
7704
6a4f2947
MC
7705 flags = le32_to_cpu(resp->flags);
7706 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 7707 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 7708 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 7709 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
7710 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7711 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
0a3f4e4f
VV
7712 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7713 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6154532f
VV
7714 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7715 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
07f83d72
MC
7716 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7717 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
4037eb71
VV
7718 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7719 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1da63ddd
EP
7720 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7721 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
80194db9
VV
7722 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
7723 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
1da63ddd
EP
7724
7725 flags_ext = le32_to_cpu(resp->flags_ext);
7726 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7727 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
caf3eedb
PC
7728 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7729 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
24ac1ecd
PC
7730 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
7731 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
892a662f
EP
7732 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7733 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
3c415339
EP
7734 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7735 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
e4060d30 7736
66ed81dc
PC
7737 flags_ext2 = le32_to_cpu(resp->flags_ext2);
7738 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
7739 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
7740
7cc5a20e 7741 bp->tx_push_thresh = 0;
fed7edd1
MC
7742 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7743 BNXT_FW_MAJ(bp) > 217)
7cc5a20e
MC
7744 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7745
6a4f2947
MC
7746 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7747 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7748 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7749 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7750 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7751 if (!hw_resc->max_hw_ring_grps)
7752 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7753 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7754 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7755 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7756
c0c050c5
MC
7757 if (BNXT_PF(bp)) {
7758 struct bnxt_pf_info *pf = &bp->pf;
7759
7760 pf->fw_fid = le16_to_cpu(resp->fid);
7761 pf->port_id = le16_to_cpu(resp->port_id);
11f15ed3 7762 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
7763 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7764 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7765 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7766 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7767 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7768 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7769 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7770 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
ba642ab7 7771 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6a4f2947 7772 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 7773 bp->flags |= BNXT_FLAG_WOL_CAP;
de5bf194 7774 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
ae5c42f0 7775 __bnxt_hwrm_ptp_qcfg(bp);
de5bf194 7776 } else {
a521c8a0 7777 bnxt_ptp_clear(bp);
de5bf194
MC
7778 kfree(bp->ptp_cfg);
7779 bp->ptp_cfg = NULL;
7780 }
c0c050c5 7781 } else {
379a80a1 7782#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
7783 struct bnxt_vf_info *vf = &bp->vf;
7784
7785 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 7786 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 7787#endif
c0c050c5
MC
7788 }
7789
c0c050c5 7790hwrm_func_qcaps_exit:
bbf33d1d 7791 hwrm_req_drop(bp, req);
c0c050c5
MC
7792 return rc;
7793}
7794
80194db9
VV
7795static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7796{
7797 struct hwrm_dbg_qcaps_output *resp;
7798 struct hwrm_dbg_qcaps_input *req;
7799 int rc;
7800
7801 bp->fw_dbg_cap = 0;
7802 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7803 return;
7804
7805 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7806 if (rc)
7807 return;
7808
7809 req->fid = cpu_to_le16(0xffff);
7810 resp = hwrm_req_hold(bp, req);
7811 rc = hwrm_req_send(bp, req);
7812 if (rc)
7813 goto hwrm_dbg_qcaps_exit;
7814
7815 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7816
7817hwrm_dbg_qcaps_exit:
7818 hwrm_req_drop(bp, req);
7819}
7820
804fba4e
MC
7821static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7822
c5b744d3 7823int bnxt_hwrm_func_qcaps(struct bnxt *bp)
be0dd9c4
MC
7824{
7825 int rc;
7826
7827 rc = __bnxt_hwrm_func_qcaps(bp);
7828 if (rc)
7829 return rc;
80194db9
VV
7830
7831 bnxt_hwrm_dbg_qcaps(bp);
7832
804fba4e
MC
7833 rc = bnxt_hwrm_queue_qportcfg(bp);
7834 if (rc) {
7835 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7836 return rc;
7837 }
be0dd9c4 7838 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
7839 rc = bnxt_alloc_ctx_mem(bp);
7840 if (rc)
7841 return rc;
db4723b3 7842 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 7843 if (!rc)
97381a18 7844 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
7845 }
7846 return 0;
7847}
7848
e969ae5b
MC
7849static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7850{
e969ae5b 7851 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
bbf33d1d 7852 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
e969ae5b 7853 u32 flags;
bbf33d1d 7854 int rc;
e969ae5b
MC
7855
7856 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7857 return 0;
7858
bbf33d1d
EP
7859 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7860 if (rc)
7861 return rc;
e969ae5b 7862
bbf33d1d
EP
7863 resp = hwrm_req_hold(bp, req);
7864 rc = hwrm_req_send(bp, req);
e969ae5b
MC
7865 if (rc)
7866 goto hwrm_cfa_adv_qcaps_exit;
7867
7868 flags = le32_to_cpu(resp->flags);
7869 if (flags &
41136ab3
MC
7870 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7871 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
e969ae5b
MC
7872
7873hwrm_cfa_adv_qcaps_exit:
bbf33d1d 7874 hwrm_req_drop(bp, req);
e969ae5b
MC
7875 return rc;
7876}
7877
3e9ec2bb
EP
7878static int __bnxt_alloc_fw_health(struct bnxt *bp)
7879{
7880 if (bp->fw_health)
7881 return 0;
7882
7883 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7884 if (!bp->fw_health)
7885 return -ENOMEM;
7886
8cc95ceb 7887 mutex_init(&bp->fw_health->lock);
3e9ec2bb
EP
7888 return 0;
7889}
7890
7891static int bnxt_alloc_fw_health(struct bnxt *bp)
7892{
7893 int rc;
7894
7895 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7896 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7897 return 0;
7898
7899 rc = __bnxt_alloc_fw_health(bp);
7900 if (rc) {
7901 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7902 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7903 return rc;
7904 }
7905
7906 return 0;
7907}
7908
ba02629f
EP
7909static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7910{
7911 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7912 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7913 BNXT_FW_HEALTH_WIN_MAP_OFF);
7914}
7915
43a440c4
MC
7916static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7917{
7918 struct bnxt_fw_health *fw_health = bp->fw_health;
7919 u32 reg_type;
7920
8cc95ceb 7921 if (!fw_health)
43a440c4
MC
7922 return;
7923
7924 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7925 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7926 fw_health->status_reliable = false;
8cc95ceb
EP
7927
7928 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
7929 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7930 fw_health->resets_reliable = false;
43a440c4
MC
7931}
7932
ba02629f
EP
7933static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7934{
7935 void __iomem *hs;
7936 u32 status_loc;
7937 u32 reg_type;
7938 u32 sig;
7939
43a440c4
MC
7940 if (bp->fw_health)
7941 bp->fw_health->status_reliable = false;
7942
ba02629f
EP
7943 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7944 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7945
7946 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7947 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
d1cbd165
MC
7948 if (!bp->chip_num) {
7949 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7950 bp->chip_num = readl(bp->bar0 +
7951 BNXT_FW_HEALTH_WIN_BASE +
7952 BNXT_GRC_REG_CHIP_NUM);
7953 }
43a440c4 7954 if (!BNXT_CHIP_P5(bp))
d1cbd165 7955 return;
43a440c4 7956
d1cbd165
MC
7957 status_loc = BNXT_GRC_REG_STATUS_P5 |
7958 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7959 } else {
7960 status_loc = readl(hs + offsetof(struct hcomm_status,
7961 fw_status_loc));
ba02629f
EP
7962 }
7963
7964 if (__bnxt_alloc_fw_health(bp)) {
7965 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7966 return;
7967 }
7968
ba02629f
EP
7969 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7970 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7971 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7972 __bnxt_map_fw_health_reg(bp, status_loc);
7973 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7974 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7975 }
7976
7977 bp->fw_health->status_reliable = true;
7978}
7979
9ffbd677
MC
7980static int bnxt_map_fw_health_regs(struct bnxt *bp)
7981{
7982 struct bnxt_fw_health *fw_health = bp->fw_health;
7983 u32 reg_base = 0xffffffff;
7984 int i;
7985
43a440c4 7986 bp->fw_health->status_reliable = false;
8cc95ceb 7987 bp->fw_health->resets_reliable = false;
9ffbd677
MC
7988 /* Only pre-map the monitoring GRC registers using window 3 */
7989 for (i = 0; i < 4; i++) {
7990 u32 reg = fw_health->regs[i];
7991
7992 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7993 continue;
7994 if (reg_base == 0xffffffff)
7995 reg_base = reg & BNXT_GRC_BASE_MASK;
7996 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7997 return -ERANGE;
ba02629f 7998 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9ffbd677 7999 }
43a440c4 8000 bp->fw_health->status_reliable = true;
8cc95ceb 8001 bp->fw_health->resets_reliable = true;
9ffbd677
MC
8002 if (reg_base == 0xffffffff)
8003 return 0;
8004
ba02629f 8005 __bnxt_map_fw_health_reg(bp, reg_base);
9ffbd677
MC
8006 return 0;
8007}
8008
0e0e3c53
KA
8009static void bnxt_remap_fw_health_regs(struct bnxt *bp)
8010{
8011 if (!bp->fw_health)
8012 return;
8013
8014 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
8015 bp->fw_health->status_reliable = true;
8016 bp->fw_health->resets_reliable = true;
8017 } else {
8018 bnxt_try_map_fw_health_reg(bp);
8019 }
8020}
8021
07f83d72
MC
8022static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
8023{
07f83d72 8024 struct bnxt_fw_health *fw_health = bp->fw_health;
bbf33d1d
EP
8025 struct hwrm_error_recovery_qcfg_output *resp;
8026 struct hwrm_error_recovery_qcfg_input *req;
07f83d72
MC
8027 int rc, i;
8028
8029 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
8030 return 0;
8031
bbf33d1d
EP
8032 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
8033 if (rc)
8034 return rc;
8035
8036 resp = hwrm_req_hold(bp, req);
8037 rc = hwrm_req_send(bp, req);
07f83d72
MC
8038 if (rc)
8039 goto err_recovery_out;
07f83d72
MC
8040 fw_health->flags = le32_to_cpu(resp->flags);
8041 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
8042 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
8043 rc = -EINVAL;
8044 goto err_recovery_out;
8045 }
8046 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
8047 fw_health->master_func_wait_dsecs =
8048 le32_to_cpu(resp->master_func_wait_period);
8049 fw_health->normal_func_wait_dsecs =
8050 le32_to_cpu(resp->normal_func_wait_period);
8051 fw_health->post_reset_wait_dsecs =
8052 le32_to_cpu(resp->master_func_wait_period_after_reset);
8053 fw_health->post_reset_max_wait_dsecs =
8054 le32_to_cpu(resp->max_bailout_time_after_reset);
8055 fw_health->regs[BNXT_FW_HEALTH_REG] =
8056 le32_to_cpu(resp->fw_health_status_reg);
8057 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
8058 le32_to_cpu(resp->fw_heartbeat_reg);
8059 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
8060 le32_to_cpu(resp->fw_reset_cnt_reg);
8061 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
8062 le32_to_cpu(resp->reset_inprogress_reg);
8063 fw_health->fw_reset_inprog_reg_mask =
8064 le32_to_cpu(resp->reset_inprogress_reg_mask);
8065 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
8066 if (fw_health->fw_reset_seq_cnt >= 16) {
8067 rc = -EINVAL;
8068 goto err_recovery_out;
8069 }
8070 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
8071 fw_health->fw_reset_seq_regs[i] =
8072 le32_to_cpu(resp->reset_reg[i]);
8073 fw_health->fw_reset_seq_vals[i] =
8074 le32_to_cpu(resp->reset_reg_val[i]);
8075 fw_health->fw_reset_seq_delay_msec[i] =
8076 resp->delay_after_reset[i];
8077 }
8078err_recovery_out:
bbf33d1d 8079 hwrm_req_drop(bp, req);
9ffbd677
MC
8080 if (!rc)
8081 rc = bnxt_map_fw_health_regs(bp);
07f83d72
MC
8082 if (rc)
8083 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
8084 return rc;
8085}
8086
c0c050c5
MC
8087static int bnxt_hwrm_func_reset(struct bnxt *bp)
8088{
bbf33d1d
EP
8089 struct hwrm_func_reset_input *req;
8090 int rc;
c0c050c5 8091
bbf33d1d
EP
8092 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
8093 if (rc)
8094 return rc;
c0c050c5 8095
bbf33d1d
EP
8096 req->enables = 0;
8097 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
8098 return hwrm_req_send(bp, req);
c0c050c5
MC
8099}
8100
4933f675
VV
8101static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
8102{
8103 struct hwrm_nvm_get_dev_info_output nvm_info;
8104
8105 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
8106 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
8107 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
8108 nvm_info.nvm_cfg_ver_upd);
8109}
8110
c0c050c5
MC
8111static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
8112{
bbf33d1d
EP
8113 struct hwrm_queue_qportcfg_output *resp;
8114 struct hwrm_queue_qportcfg_input *req;
aabfc016
MC
8115 u8 i, j, *qptr;
8116 bool no_rdma;
bbf33d1d 8117 int rc = 0;
c0c050c5 8118
bbf33d1d
EP
8119 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
8120 if (rc)
8121 return rc;
c0c050c5 8122
bbf33d1d
EP
8123 resp = hwrm_req_hold(bp, req);
8124 rc = hwrm_req_send(bp, req);
c0c050c5
MC
8125 if (rc)
8126 goto qportcfg_exit;
8127
8128 if (!resp->max_configurable_queues) {
8129 rc = -EINVAL;
8130 goto qportcfg_exit;
8131 }
8132 bp->max_tc = resp->max_configurable_queues;
87c374de 8133 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
8134 if (bp->max_tc > BNXT_MAX_QUEUE)
8135 bp->max_tc = BNXT_MAX_QUEUE;
8136
aabfc016
MC
8137 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
8138 qptr = &resp->queue_id0;
8139 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
8140 bp->q_info[j].queue_id = *qptr;
8141 bp->q_ids[i] = *qptr++;
aabfc016
MC
8142 bp->q_info[j].queue_profile = *qptr++;
8143 bp->tc_to_qidx[j] = j;
8144 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
8145 (no_rdma && BNXT_PF(bp)))
8146 j++;
8147 }
98f04cf0 8148 bp->max_q = bp->max_tc;
aabfc016
MC
8149 bp->max_tc = max_t(u8, j, 1);
8150
441cabbb
MC
8151 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
8152 bp->max_tc = 1;
8153
87c374de
MC
8154 if (bp->max_lltc > bp->max_tc)
8155 bp->max_lltc = bp->max_tc;
8156
c0c050c5 8157qportcfg_exit:
bbf33d1d 8158 hwrm_req_drop(bp, req);
c0c050c5
MC
8159 return rc;
8160}
8161
7b370ad7 8162static int bnxt_hwrm_poll(struct bnxt *bp)
c0c050c5 8163{
bbf33d1d 8164 struct hwrm_ver_get_input *req;
ba642ab7 8165 int rc;
c0c050c5 8166
bbf33d1d
EP
8167 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8168 if (rc)
8169 return rc;
ba642ab7 8170
bbf33d1d
EP
8171 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8172 req->hwrm_intf_min = HWRM_VERSION_MINOR;
8173 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8174
8175 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
8176 rc = hwrm_req_send(bp, req);
ba642ab7
MC
8177 return rc;
8178}
8179
8180static int bnxt_hwrm_ver_get(struct bnxt *bp)
8181{
bbf33d1d
EP
8182 struct hwrm_ver_get_output *resp;
8183 struct hwrm_ver_get_input *req;
d0ad2ea2 8184 u16 fw_maj, fw_min, fw_bld, fw_rsv;
b7a444f0 8185 u32 dev_caps_cfg, hwrm_ver;
d0ad2ea2 8186 int rc, len;
ba642ab7 8187
bbf33d1d
EP
8188 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8189 if (rc)
8190 return rc;
8191
8192 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
ba642ab7 8193 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bbf33d1d
EP
8194 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8195 req->hwrm_intf_min = HWRM_VERSION_MINOR;
8196 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7b370ad7 8197
bbf33d1d
EP
8198 resp = hwrm_req_hold(bp, req);
8199 rc = hwrm_req_send(bp, req);
c0c050c5
MC
8200 if (rc)
8201 goto hwrm_ver_get_exit;
8202
8203 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8204
894aa69a
MC
8205 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8206 resp->hwrm_intf_min_8b << 8 |
8207 resp->hwrm_intf_upd_8b;
8208 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 8209 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
8210 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8211 resp->hwrm_intf_upd_8b);
c193554e 8212 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 8213 }
b7a444f0
VV
8214
8215 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8216 HWRM_VERSION_UPDATE;
8217
8218 if (bp->hwrm_spec_code > hwrm_ver)
8219 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8220 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8221 HWRM_VERSION_UPDATE);
8222 else
8223 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8224 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8225 resp->hwrm_intf_upd_8b);
8226
d0ad2ea2
MC
8227 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8228 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8229 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8230 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8231 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8232 len = FW_VER_STR_LEN;
8233 } else {
8234 fw_maj = resp->hwrm_fw_maj_8b;
8235 fw_min = resp->hwrm_fw_min_8b;
8236 fw_bld = resp->hwrm_fw_bld_8b;
8237 fw_rsv = resp->hwrm_fw_rsvd_8b;
8238 len = BC_HWRM_STR_LEN;
8239 }
8240 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8241 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8242 fw_rsv);
c0c050c5 8243
691aa620
VV
8244 if (strlen(resp->active_pkg_name)) {
8245 int fw_ver_len = strlen(bp->fw_ver_str);
8246
8247 snprintf(bp->fw_ver_str + fw_ver_len,
8248 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8249 resp->active_pkg_name);
8250 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8251 }
8252
ff4fe81d
MC
8253 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8254 if (!bp->hwrm_cmd_timeout)
8255 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
bce9a0b7
EP
8256 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8257 if (!bp->hwrm_cmd_max_timeout)
8258 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8259 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8260 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8261 bp->hwrm_cmd_max_timeout / 1000);
ff4fe81d 8262
1dfddc41 8263 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 8264 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
8265 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8266 }
8267 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8268 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 8269
659c805c 8270 bp->chip_num = le16_to_cpu(resp->chip_num);
5313845f 8271 bp->chip_rev = resp->chip_rev;
3e8060fa
PS
8272 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8273 !resp->chip_metal)
8274 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 8275
e605db80
DK
8276 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8277 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8278 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 8279 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 8280
760b6d33
VD
8281 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8282 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8283
abd43a13
VD
8284 if (dev_caps_cfg &
8285 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8286 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8287
2a516444
MC
8288 if (dev_caps_cfg &
8289 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8290 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8291
e969ae5b
MC
8292 if (dev_caps_cfg &
8293 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8294 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8295
c0c050c5 8296hwrm_ver_get_exit:
bbf33d1d 8297 hwrm_req_drop(bp, req);
c0c050c5
MC
8298 return rc;
8299}
8300
5ac67d8b
RS
8301int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8302{
bbf33d1d 8303 struct hwrm_fw_set_time_input *req;
7dfaa7bc
AB
8304 struct tm tm;
8305 time64_t now = ktime_get_real_seconds();
bbf33d1d 8306 int rc;
5ac67d8b 8307
ca2c39e2
MC
8308 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8309 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
8310 return -EOPNOTSUPP;
8311
7dfaa7bc 8312 time64_to_tm(now, 0, &tm);
bbf33d1d
EP
8313 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8314 if (rc)
8315 return rc;
8316
8317 req->year = cpu_to_le16(1900 + tm.tm_year);
8318 req->month = 1 + tm.tm_mon;
8319 req->day = tm.tm_mday;
8320 req->hour = tm.tm_hour;
8321 req->minute = tm.tm_min;
8322 req->second = tm.tm_sec;
8323 return hwrm_req_send(bp, req);
5ac67d8b
RS
8324}
8325
fea6b333
MC
8326static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8327{
8328 u64 sw_tmp;
8329
fa97f303 8330 hw &= mask;
fea6b333
MC
8331 sw_tmp = (*sw & ~mask) | hw;
8332 if (hw < (*sw & mask))
8333 sw_tmp += mask + 1;
8334 WRITE_ONCE(*sw, sw_tmp);
8335}
8336
8337static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8338 int count, bool ignore_zero)
8339{
8340 int i;
8341
8342 for (i = 0; i < count; i++) {
8343 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8344
8345 if (ignore_zero && !hw)
8346 continue;
8347
8348 if (masks[i] == -1ULL)
8349 sw_stats[i] = hw;
8350 else
8351 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8352 }
8353}
8354
8355static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8356{
8357 if (!stats->hw_stats)
8358 return;
8359
8360 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8361 stats->hw_masks, stats->len / 8, false);
8362}
8363
8364static void bnxt_accumulate_all_stats(struct bnxt *bp)
8365{
8366 struct bnxt_stats_mem *ring0_stats;
8367 bool ignore_zero = false;
8368 int i;
8369
8370 /* Chip bug. Counter intermittently becomes 0. */
8371 if (bp->flags & BNXT_FLAG_CHIP_P5)
8372 ignore_zero = true;
8373
8374 for (i = 0; i < bp->cp_nr_rings; i++) {
8375 struct bnxt_napi *bnapi = bp->bnapi[i];
8376 struct bnxt_cp_ring_info *cpr;
8377 struct bnxt_stats_mem *stats;
8378
8379 cpr = &bnapi->cp_ring;
8380 stats = &cpr->stats;
8381 if (!i)
8382 ring0_stats = stats;
8383 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8384 ring0_stats->hw_masks,
8385 ring0_stats->len / 8, ignore_zero);
8386 }
8387 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8388 struct bnxt_stats_mem *stats = &bp->port_stats;
8389 __le64 *hw_stats = stats->hw_stats;
8390 u64 *sw_stats = stats->sw_stats;
8391 u64 *masks = stats->hw_masks;
8392 int cnt;
8393
8394 cnt = sizeof(struct rx_port_stats) / 8;
8395 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8396
8397 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8398 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8399 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8400 cnt = sizeof(struct tx_port_stats) / 8;
8401 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8402 }
8403 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8404 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8405 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8406 }
8407}
8408
531d1d26 8409static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
3bdf56c4 8410{
bbf33d1d 8411 struct hwrm_port_qstats_input *req;
3bdf56c4 8412 struct bnxt_pf_info *pf = &bp->pf;
bbf33d1d 8413 int rc;
3bdf56c4
MC
8414
8415 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8416 return 0;
8417
531d1d26
MC
8418 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8419 return -EOPNOTSUPP;
8420
bbf33d1d
EP
8421 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8422 if (rc)
8423 return rc;
8424
8425 req->flags = flags;
8426 req->port_id = cpu_to_le16(pf->port_id);
8427 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
177a6cde 8428 BNXT_TX_PORT_STATS_BYTE_OFFSET);
bbf33d1d
EP
8429 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8430 return hwrm_req_send(bp, req);
3bdf56c4
MC
8431}
8432
531d1d26 8433static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
00db3cba 8434{
bbf33d1d
EP
8435 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8436 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8437 struct hwrm_port_qstats_ext_output *resp_qs;
8438 struct hwrm_port_qstats_ext_input *req_qs;
00db3cba 8439 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 8440 u32 tx_stat_size;
36e53349 8441 int rc;
00db3cba
VV
8442
8443 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8444 return 0;
8445
531d1d26
MC
8446 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8447 return -EOPNOTSUPP;
8448
bbf33d1d
EP
8449 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8450 if (rc)
8451 return rc;
8452
8453 req_qs->flags = flags;
8454 req_qs->port_id = cpu_to_le16(pf->port_id);
8455 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8456 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
177a6cde
MC
8457 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8458 sizeof(struct tx_port_stats_ext) : 0;
bbf33d1d
EP
8459 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8460 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8461 resp_qs = hwrm_req_hold(bp, req_qs);
8462 rc = hwrm_req_send(bp, req_qs);
36e53349 8463 if (!rc) {
bbf33d1d
EP
8464 bp->fw_rx_stats_ext_size =
8465 le16_to_cpu(resp_qs->rx_stat_size) / 8;
21e70778
MC
8466 if (BNXT_FW_MAJ(bp) < 220 &&
8467 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8468 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8469
ad361adf 8470 bp->fw_tx_stats_ext_size = tx_stat_size ?
bbf33d1d 8471 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
36e53349
MC
8472 } else {
8473 bp->fw_rx_stats_ext_size = 0;
8474 bp->fw_tx_stats_ext_size = 0;
8475 }
bbf33d1d
EP
8476 hwrm_req_drop(bp, req_qs);
8477
531d1d26 8478 if (flags)
bbf33d1d 8479 return rc;
531d1d26 8480
e37fed79
MC
8481 if (bp->fw_tx_stats_ext_size <=
8482 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
e37fed79
MC
8483 bp->pri2cos_valid = 0;
8484 return rc;
8485 }
8486
bbf33d1d
EP
8487 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8488 if (rc)
8489 return rc;
e37fed79 8490
bbf33d1d
EP
8491 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8492
8493 resp_qc = hwrm_req_hold(bp, req_qc);
8494 rc = hwrm_req_send(bp, req_qc);
e37fed79 8495 if (!rc) {
e37fed79
MC
8496 u8 *pri2cos;
8497 int i, j;
8498
bbf33d1d 8499 pri2cos = &resp_qc->pri0_cos_queue_id;
e37fed79
MC
8500 for (i = 0; i < 8; i++) {
8501 u8 queue_id = pri2cos[i];
a24ec322 8502 u8 queue_idx;
e37fed79 8503
a24ec322
MC
8504 /* Per port queue IDs start from 0, 10, 20, etc */
8505 queue_idx = queue_id % 10;
8506 if (queue_idx > BNXT_MAX_QUEUE) {
8507 bp->pri2cos_valid = false;
bbf33d1d
EP
8508 hwrm_req_drop(bp, req_qc);
8509 return rc;
a24ec322 8510 }
e37fed79
MC
8511 for (j = 0; j < bp->max_q; j++) {
8512 if (bp->q_ids[j] == queue_id)
a24ec322 8513 bp->pri2cos_idx[i] = queue_idx;
e37fed79
MC
8514 }
8515 }
bbf33d1d 8516 bp->pri2cos_valid = true;
e37fed79 8517 }
bbf33d1d
EP
8518 hwrm_req_drop(bp, req_qc);
8519
36e53349 8520 return rc;
00db3cba
VV
8521}
8522
c0c050c5
MC
8523static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8524{
7ae9dc35
MC
8525 bnxt_hwrm_tunnel_dst_port_free(bp,
8526 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8527 bnxt_hwrm_tunnel_dst_port_free(bp,
8528 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
c0c050c5
MC
8529}
8530
8531static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8532{
8533 int rc, i;
8534 u32 tpa_flags = 0;
8535
8536 if (set_tpa)
8537 tpa_flags = bp->flags & BNXT_FLAG_TPA;
b340dc68 8538 else if (BNXT_NO_FW_ACCESS(bp))
b4fff207 8539 return 0;
c0c050c5
MC
8540 for (i = 0; i < bp->nr_vnics; i++) {
8541 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8542 if (rc) {
8543 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 8544 i, rc);
c0c050c5
MC
8545 return rc;
8546 }
8547 }
8548 return 0;
8549}
8550
8551static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8552{
8553 int i;
8554
8555 for (i = 0; i < bp->nr_vnics; i++)
8556 bnxt_hwrm_vnic_set_rss(bp, i, false);
8557}
8558
a46ecb11 8559static void bnxt_clear_vnic(struct bnxt *bp)
c0c050c5 8560{
a46ecb11
MC
8561 if (!bp->vnic_info)
8562 return;
8563
8564 bnxt_hwrm_clear_vnic_filter(bp);
8565 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
c0c050c5
MC
8566 /* clear all RSS setting before free vnic ctx */
8567 bnxt_hwrm_clear_vnic_rss(bp);
8568 bnxt_hwrm_vnic_ctx_free(bp);
c0c050c5 8569 }
a46ecb11
MC
8570 /* before free the vnic, undo the vnic tpa settings */
8571 if (bp->flags & BNXT_FLAG_TPA)
8572 bnxt_set_tpa(bp, false);
8573 bnxt_hwrm_vnic_free(bp);
8574 if (bp->flags & BNXT_FLAG_CHIP_P5)
8575 bnxt_hwrm_vnic_ctx_free(bp);
8576}
8577
8578static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8579 bool irq_re_init)
8580{
8581 bnxt_clear_vnic(bp);
c0c050c5
MC
8582 bnxt_hwrm_ring_free(bp, close_path);
8583 bnxt_hwrm_ring_grp_free(bp);
8584 if (irq_re_init) {
8585 bnxt_hwrm_stat_ctx_free(bp);
8586 bnxt_hwrm_free_tunnel_ports(bp);
8587 }
8588}
8589
39d8ba2e
MC
8590static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8591{
bbf33d1d
EP
8592 struct hwrm_func_cfg_input *req;
8593 u8 evb_mode;
8594 int rc;
39d8ba2e 8595
39d8ba2e 8596 if (br_mode == BRIDGE_MODE_VEB)
bbf33d1d 8597 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
39d8ba2e 8598 else if (br_mode == BRIDGE_MODE_VEPA)
bbf33d1d 8599 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
39d8ba2e
MC
8600 else
8601 return -EINVAL;
bbf33d1d
EP
8602
8603 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8604 if (rc)
8605 return rc;
8606
8607 req->fid = cpu_to_le16(0xffff);
8608 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8609 req->evb_mode = evb_mode;
8610 return hwrm_req_send(bp, req);
39d8ba2e
MC
8611}
8612
c3480a60
MC
8613static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8614{
bbf33d1d
EP
8615 struct hwrm_func_cfg_input *req;
8616 int rc;
c3480a60
MC
8617
8618 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8619 return 0;
8620
bbf33d1d
EP
8621 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8622 if (rc)
8623 return rc;
8624
8625 req->fid = cpu_to_le16(0xffff);
8626 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8627 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 8628 if (size == 128)
bbf33d1d 8629 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60 8630
bbf33d1d 8631 return hwrm_req_send(bp, req);
c3480a60
MC
8632}
8633
7b3af4f7 8634static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 8635{
ae10ae74 8636 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
8637 int rc;
8638
ae10ae74
MC
8639 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8640 goto skip_rss_ctx;
8641
c0c050c5 8642 /* allocate context for vnic */
94ce9caa 8643 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
8644 if (rc) {
8645 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8646 vnic_id, rc);
8647 goto vnic_setup_err;
8648 }
8649 bp->rsscos_nr_ctxs++;
8650
94ce9caa
PS
8651 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8652 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8653 if (rc) {
8654 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8655 vnic_id, rc);
8656 goto vnic_setup_err;
8657 }
8658 bp->rsscos_nr_ctxs++;
8659 }
8660
ae10ae74 8661skip_rss_ctx:
c0c050c5
MC
8662 /* configure default vnic, ring grp */
8663 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8664 if (rc) {
8665 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8666 vnic_id, rc);
8667 goto vnic_setup_err;
8668 }
8669
8670 /* Enable RSS hashing on vnic */
8671 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8672 if (rc) {
8673 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8674 vnic_id, rc);
8675 goto vnic_setup_err;
8676 }
8677
8678 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8679 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8680 if (rc) {
8681 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8682 vnic_id, rc);
8683 }
8684 }
8685
8686vnic_setup_err:
8687 return rc;
8688}
8689
7b3af4f7
MC
8690static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8691{
8692 int rc, i, nr_ctxs;
8693
f9f6a3fb 8694 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7
MC
8695 for (i = 0; i < nr_ctxs; i++) {
8696 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8697 if (rc) {
8698 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8699 vnic_id, i, rc);
8700 break;
8701 }
8702 bp->rsscos_nr_ctxs++;
8703 }
8704 if (i < nr_ctxs)
8705 return -ENOMEM;
8706
8707 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8708 if (rc) {
8709 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8710 vnic_id, rc);
8711 return rc;
8712 }
8713 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8714 if (rc) {
8715 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8716 vnic_id, rc);
8717 return rc;
8718 }
8719 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8720 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8721 if (rc) {
8722 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8723 vnic_id, rc);
8724 }
8725 }
8726 return rc;
8727}
8728
8729static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8730{
8731 if (bp->flags & BNXT_FLAG_CHIP_P5)
8732 return __bnxt_setup_vnic_p5(bp, vnic_id);
8733 else
8734 return __bnxt_setup_vnic(bp, vnic_id);
8735}
8736
c0c050c5
MC
8737static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8738{
8739#ifdef CONFIG_RFS_ACCEL
8740 int i, rc = 0;
8741
9b3d15e6
MC
8742 if (bp->flags & BNXT_FLAG_CHIP_P5)
8743 return 0;
8744
c0c050c5 8745 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 8746 struct bnxt_vnic_info *vnic;
c0c050c5
MC
8747 u16 vnic_id = i + 1;
8748 u16 ring_id = i;
8749
8750 if (vnic_id >= bp->nr_vnics)
8751 break;
8752
ae10ae74
MC
8753 vnic = &bp->vnic_info[vnic_id];
8754 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8755 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8756 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 8757 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
8758 if (rc) {
8759 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8760 vnic_id, rc);
8761 break;
8762 }
8763 rc = bnxt_setup_vnic(bp, vnic_id);
8764 if (rc)
8765 break;
8766 }
8767 return rc;
8768#else
8769 return 0;
8770#endif
8771}
8772
dd85fc0a 8773/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
17c71ac3
MC
8774static bool bnxt_promisc_ok(struct bnxt *bp)
8775{
8776#ifdef CONFIG_BNXT_SRIOV
dd85fc0a 8777 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
17c71ac3
MC
8778 return false;
8779#endif
8780 return true;
8781}
8782
dc52c6c7
PS
8783static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8784{
8785 unsigned int rc = 0;
8786
8787 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8788 if (rc) {
8789 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8790 rc);
8791 return rc;
8792 }
8793
8794 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8795 if (rc) {
8796 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8797 rc);
8798 return rc;
8799 }
8800 return rc;
8801}
8802
b664f008 8803static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 8804static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 8805
c0c050c5
MC
8806static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8807{
7d2837dd 8808 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 8809 int rc = 0;
76595193 8810 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
8811
8812 if (irq_re_init) {
8813 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8814 if (rc) {
8815 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8816 rc);
8817 goto err_out;
8818 }
8819 }
8820
8821 rc = bnxt_hwrm_ring_alloc(bp);
8822 if (rc) {
8823 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8824 goto err_out;
8825 }
8826
8827 rc = bnxt_hwrm_ring_grp_alloc(bp);
8828 if (rc) {
8829 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8830 goto err_out;
8831 }
8832
76595193
PS
8833 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8834 rx_nr_rings--;
8835
c0c050c5 8836 /* default vnic 0 */
76595193 8837 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
8838 if (rc) {
8839 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8840 goto err_out;
8841 }
8842
8843 rc = bnxt_setup_vnic(bp, 0);
8844 if (rc)
8845 goto err_out;
98a4322b
EP
8846 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
8847 bnxt_hwrm_update_rss_hash_cfg(bp);
c0c050c5
MC
8848
8849 if (bp->flags & BNXT_FLAG_RFS) {
8850 rc = bnxt_alloc_rfs_vnics(bp);
8851 if (rc)
8852 goto err_out;
8853 }
8854
8855 if (bp->flags & BNXT_FLAG_TPA) {
8856 rc = bnxt_set_tpa(bp, true);
8857 if (rc)
8858 goto err_out;
8859 }
8860
8861 if (BNXT_VF(bp))
8862 bnxt_update_vf_mac(bp);
8863
8864 /* Filter for default vnic 0 */
8865 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8866 if (rc) {
662c9b22
EP
8867 if (BNXT_VF(bp) && rc == -ENODEV)
8868 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8869 else
8870 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
c0c050c5
MC
8871 goto err_out;
8872 }
7d2837dd 8873 vnic->uc_filter_count = 1;
c0c050c5 8874
30e33848 8875 vnic->rx_mask = 0;
cfcab3b3
MC
8876 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8877 goto skip_rx_mask;
8878
30e33848
MC
8879 if (bp->dev->flags & IFF_BROADCAST)
8880 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 8881
dd85fc0a 8882 if (bp->dev->flags & IFF_PROMISC)
7d2837dd
MC
8883 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8884
8885 if (bp->dev->flags & IFF_ALLMULTI) {
8886 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8887 vnic->mc_list_count = 0;
8cdb1592 8888 } else if (bp->dev->flags & IFF_MULTICAST) {
7d2837dd
MC
8889 u32 mask = 0;
8890
8891 bnxt_mc_list_updated(bp, &mask);
8892 vnic->rx_mask |= mask;
8893 }
c0c050c5 8894
b664f008
MC
8895 rc = bnxt_cfg_rx_mode(bp);
8896 if (rc)
c0c050c5 8897 goto err_out;
c0c050c5 8898
cfcab3b3 8899skip_rx_mask:
c0c050c5
MC
8900 rc = bnxt_hwrm_set_coal(bp);
8901 if (rc)
8902 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
8903 rc);
8904
8905 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8906 rc = bnxt_setup_nitroa0_vnic(bp);
8907 if (rc)
8908 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8909 rc);
8910 }
c0c050c5 8911
cf6645f8
MC
8912 if (BNXT_VF(bp)) {
8913 bnxt_hwrm_func_qcfg(bp);
8914 netdev_update_features(bp->dev);
8915 }
8916
c0c050c5
MC
8917 return 0;
8918
8919err_out:
8920 bnxt_hwrm_resource_free(bp, 0, true);
8921
8922 return rc;
8923}
8924
8925static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8926{
8927 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8928 return 0;
8929}
8930
8931static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8932{
2247925f 8933 bnxt_init_cp_rings(bp);
c0c050c5
MC
8934 bnxt_init_rx_rings(bp);
8935 bnxt_init_tx_rings(bp);
8936 bnxt_init_ring_grps(bp, irq_re_init);
8937 bnxt_init_vnics(bp);
8938
8939 return bnxt_init_chip(bp, irq_re_init);
8940}
8941
c0c050c5
MC
8942static int bnxt_set_real_num_queues(struct bnxt *bp)
8943{
8944 int rc;
8945 struct net_device *dev = bp->dev;
8946
5f449249
MC
8947 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8948 bp->tx_nr_rings_xdp);
c0c050c5
MC
8949 if (rc)
8950 return rc;
8951
8952 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8953 if (rc)
8954 return rc;
8955
8956#ifdef CONFIG_RFS_ACCEL
45019a18 8957 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 8958 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
8959#endif
8960
8961 return rc;
8962}
8963
6e6c5a57
MC
8964static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8965 bool shared)
8966{
8967 int _rx = *rx, _tx = *tx;
8968
8969 if (shared) {
8970 *rx = min_t(int, _rx, max);
8971 *tx = min_t(int, _tx, max);
8972 } else {
8973 if (max < 2)
8974 return -ENOMEM;
8975
8976 while (_rx + _tx > max) {
8977 if (_rx > _tx && _rx > 1)
8978 _rx--;
8979 else if (_tx > 1)
8980 _tx--;
8981 }
8982 *rx = _rx;
8983 *tx = _tx;
8984 }
8985 return 0;
8986}
8987
7809592d
MC
8988static void bnxt_setup_msix(struct bnxt *bp)
8989{
8990 const int len = sizeof(bp->irq_tbl[0].name);
8991 struct net_device *dev = bp->dev;
8992 int tcs, i;
8993
8994 tcs = netdev_get_num_tc(dev);
18e4960c 8995 if (tcs) {
d1e7925e 8996 int i, off, count;
7809592d 8997
d1e7925e
MC
8998 for (i = 0; i < tcs; i++) {
8999 count = bp->tx_nr_rings_per_tc;
9000 off = i * count;
9001 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
9002 }
9003 }
9004
9005 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 9006 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
9007 char *attr;
9008
9009 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9010 attr = "TxRx";
9011 else if (i < bp->rx_nr_rings)
9012 attr = "rx";
9013 else
9014 attr = "tx";
9015
e5811b8c
MC
9016 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
9017 attr, i);
9018 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
9019 }
9020}
9021
9022static void bnxt_setup_inta(struct bnxt *bp)
9023{
9024 const int len = sizeof(bp->irq_tbl[0].name);
9025
9026 if (netdev_get_num_tc(bp->dev))
9027 netdev_reset_tc(bp->dev);
9028
9029 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
9030 0);
9031 bp->irq_tbl[0].handler = bnxt_inta;
9032}
9033
20d7d1c5
EP
9034static int bnxt_init_int_mode(struct bnxt *bp);
9035
7809592d
MC
9036static int bnxt_setup_int_mode(struct bnxt *bp)
9037{
9038 int rc;
9039
20d7d1c5
EP
9040 if (!bp->irq_tbl) {
9041 rc = bnxt_init_int_mode(bp);
9042 if (rc || !bp->irq_tbl)
9043 return rc ?: -ENODEV;
9044 }
9045
7809592d
MC
9046 if (bp->flags & BNXT_FLAG_USING_MSIX)
9047 bnxt_setup_msix(bp);
9048 else
9049 bnxt_setup_inta(bp);
9050
9051 rc = bnxt_set_real_num_queues(bp);
9052 return rc;
9053}
9054
b7429954 9055#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
9056static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
9057{
6a4f2947 9058 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
9059}
9060
9061static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
9062{
6a4f2947 9063 return bp->hw_resc.max_vnics;
8079e8f1 9064}
b7429954 9065#endif
8079e8f1 9066
e4060d30
MC
9067unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
9068{
6a4f2947 9069 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
9070}
9071
9072unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
9073{
6a4f2947 9074 return bp->hw_resc.max_cp_rings;
e4060d30
MC
9075}
9076
e916b081 9077static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 9078{
c0b8cda0
MC
9079 unsigned int cp = bp->hw_resc.max_cp_rings;
9080
9081 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9082 cp -= bnxt_get_ulp_msix_num(bp);
9083
9084 return cp;
a588e458
MC
9085}
9086
ad95c27b 9087static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 9088{
6a4f2947
MC
9089 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9090
f7588cd8
MC
9091 if (bp->flags & BNXT_FLAG_CHIP_P5)
9092 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
9093
6a4f2947 9094 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
9095}
9096
30f52947 9097static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 9098{
6a4f2947 9099 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
9100}
9101
e916b081
MC
9102unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
9103{
9104 unsigned int cp;
9105
9106 cp = bnxt_get_max_func_cp_rings_for_en(bp);
9107 if (bp->flags & BNXT_FLAG_CHIP_P5)
9108 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
9109 else
9110 return cp - bp->cp_nr_rings;
9111}
9112
c027c6b4
VV
9113unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
9114{
d77b1ad8 9115 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
9116}
9117
fbcfc8e4
MC
9118int bnxt_get_avail_msix(struct bnxt *bp, int num)
9119{
9120 int max_cp = bnxt_get_max_func_cp_rings(bp);
9121 int max_irq = bnxt_get_max_func_irqs(bp);
9122 int total_req = bp->cp_nr_rings + num;
9123 int max_idx, avail_msix;
9124
75720e63
MC
9125 max_idx = bp->total_irqs;
9126 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9127 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 9128 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 9129 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
9130 return avail_msix;
9131
9132 if (max_irq < total_req) {
9133 num = max_irq - bp->cp_nr_rings;
9134 if (num <= 0)
9135 return 0;
9136 }
9137 return num;
9138}
9139
08654eb2
MC
9140static int bnxt_get_num_msix(struct bnxt *bp)
9141{
f1ca94de 9142 if (!BNXT_NEW_RM(bp))
08654eb2
MC
9143 return bnxt_get_max_func_irqs(bp);
9144
c0b8cda0 9145 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
9146}
9147
7809592d 9148static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 9149{
fbcfc8e4 9150 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 9151 struct msix_entry *msix_ent;
c0c050c5 9152
08654eb2
MC
9153 total_vecs = bnxt_get_num_msix(bp);
9154 max = bnxt_get_max_func_irqs(bp);
9155 if (total_vecs > max)
9156 total_vecs = max;
9157
2773dfb2
MC
9158 if (!total_vecs)
9159 return 0;
9160
c0c050c5
MC
9161 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
9162 if (!msix_ent)
9163 return -ENOMEM;
9164
9165 for (i = 0; i < total_vecs; i++) {
9166 msix_ent[i].entry = i;
9167 msix_ent[i].vector = 0;
9168 }
9169
01657bcd
MC
9170 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
9171 min = 2;
9172
9173 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
9174 ulp_msix = bnxt_get_ulp_msix_num(bp);
9175 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
9176 rc = -ENODEV;
9177 goto msix_setup_exit;
9178 }
9179
9180 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
9181 if (bp->irq_tbl) {
7809592d
MC
9182 for (i = 0; i < total_vecs; i++)
9183 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 9184
7809592d 9185 bp->total_irqs = total_vecs;
c0c050c5 9186 /* Trim rings based upon num of vectors allocated */
6e6c5a57 9187 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 9188 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
9189 if (rc)
9190 goto msix_setup_exit;
9191
7809592d
MC
9192 bp->cp_nr_rings = (min == 1) ?
9193 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9194 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 9195
c0c050c5
MC
9196 } else {
9197 rc = -ENOMEM;
9198 goto msix_setup_exit;
9199 }
9200 bp->flags |= BNXT_FLAG_USING_MSIX;
9201 kfree(msix_ent);
9202 return 0;
9203
9204msix_setup_exit:
7809592d
MC
9205 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9206 kfree(bp->irq_tbl);
9207 bp->irq_tbl = NULL;
c0c050c5
MC
9208 pci_disable_msix(bp->pdev);
9209 kfree(msix_ent);
9210 return rc;
9211}
9212
7809592d 9213static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 9214{
33dbcf60 9215 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
9216 if (!bp->irq_tbl)
9217 return -ENOMEM;
9218
9219 bp->total_irqs = 1;
c0c050c5
MC
9220 bp->rx_nr_rings = 1;
9221 bp->tx_nr_rings = 1;
9222 bp->cp_nr_rings = 1;
01657bcd 9223 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 9224 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 9225 return 0;
c0c050c5
MC
9226}
9227
7809592d 9228static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5 9229{
20d7d1c5 9230 int rc = -ENODEV;
c0c050c5
MC
9231
9232 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 9233 rc = bnxt_init_msix(bp);
c0c050c5 9234
1fa72e29 9235 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 9236 /* fallback to INTA */
7809592d 9237 rc = bnxt_init_inta(bp);
c0c050c5
MC
9238 }
9239 return rc;
9240}
9241
7809592d
MC
9242static void bnxt_clear_int_mode(struct bnxt *bp)
9243{
9244 if (bp->flags & BNXT_FLAG_USING_MSIX)
9245 pci_disable_msix(bp->pdev);
9246
9247 kfree(bp->irq_tbl);
9248 bp->irq_tbl = NULL;
9249 bp->flags &= ~BNXT_FLAG_USING_MSIX;
9250}
9251
1b3f0b75 9252int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 9253{
674f50a5 9254 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 9255 bool irq_cleared = false;
674f50a5
MC
9256 int rc;
9257
9258 if (!bnxt_need_reserve_rings(bp))
9259 return 0;
9260
1b3f0b75
MC
9261 if (irq_re_init && BNXT_NEW_RM(bp) &&
9262 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 9263 bnxt_ulp_irq_stop(bp);
674f50a5 9264 bnxt_clear_int_mode(bp);
1b3f0b75 9265 irq_cleared = true;
36d65be9
MC
9266 }
9267 rc = __bnxt_reserve_rings(bp);
1b3f0b75 9268 if (irq_cleared) {
36d65be9
MC
9269 if (!rc)
9270 rc = bnxt_init_int_mode(bp);
ec86f14e 9271 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
9272 }
9273 if (rc) {
9274 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9275 return rc;
674f50a5
MC
9276 }
9277 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9278 netdev_err(bp->dev, "tx ring reservation failure\n");
9279 netdev_reset_tc(bp->dev);
9280 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9281 return -ENOMEM;
9282 }
674f50a5
MC
9283 return 0;
9284}
9285
c0c050c5
MC
9286static void bnxt_free_irq(struct bnxt *bp)
9287{
9288 struct bnxt_irq *irq;
9289 int i;
9290
9291#ifdef CONFIG_RFS_ACCEL
9292 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9293 bp->dev->rx_cpu_rmap = NULL;
9294#endif
cb98526b 9295 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
9296 return;
9297
9298 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9299 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9300
9301 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
9302 if (irq->requested) {
9303 if (irq->have_cpumask) {
9304 irq_set_affinity_hint(irq->vector, NULL);
9305 free_cpumask_var(irq->cpu_mask);
9306 irq->have_cpumask = 0;
9307 }
c0c050c5 9308 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
9309 }
9310
c0c050c5
MC
9311 irq->requested = 0;
9312 }
c0c050c5
MC
9313}
9314
9315static int bnxt_request_irq(struct bnxt *bp)
9316{
b81a90d3 9317 int i, j, rc = 0;
c0c050c5
MC
9318 unsigned long flags = 0;
9319#ifdef CONFIG_RFS_ACCEL
e5811b8c 9320 struct cpu_rmap *rmap;
c0c050c5
MC
9321#endif
9322
e5811b8c
MC
9323 rc = bnxt_setup_int_mode(bp);
9324 if (rc) {
9325 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9326 rc);
9327 return rc;
9328 }
9329#ifdef CONFIG_RFS_ACCEL
9330 rmap = bp->dev->rx_cpu_rmap;
9331#endif
c0c050c5
MC
9332 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9333 flags = IRQF_SHARED;
9334
b81a90d3 9335 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9336 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9337 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9338
c0c050c5 9339#ifdef CONFIG_RFS_ACCEL
b81a90d3 9340 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
9341 rc = irq_cpu_rmap_add(rmap, irq->vector);
9342 if (rc)
9343 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
9344 j);
9345 j++;
c0c050c5
MC
9346 }
9347#endif
9348 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9349 bp->bnapi[i]);
9350 if (rc)
9351 break;
9352
9353 irq->requested = 1;
56f0fd80
VV
9354
9355 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9356 int numa_node = dev_to_node(&bp->pdev->dev);
9357
9358 irq->have_cpumask = 1;
9359 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9360 irq->cpu_mask);
9361 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9362 if (rc) {
9363 netdev_warn(bp->dev,
9364 "Set affinity failed, IRQ = %d\n",
9365 irq->vector);
9366 break;
9367 }
9368 }
c0c050c5
MC
9369 }
9370 return rc;
9371}
9372
9373static void bnxt_del_napi(struct bnxt *bp)
9374{
9375 int i;
9376
9377 if (!bp->bnapi)
9378 return;
9379
9380 for (i = 0; i < bp->cp_nr_rings; i++) {
9381 struct bnxt_napi *bnapi = bp->bnapi[i];
9382
5198d545 9383 __netif_napi_del(&bnapi->napi);
c0c050c5 9384 }
5198d545 9385 /* We called __netif_napi_del(), we need
e5f6f564
ED
9386 * to respect an RCU grace period before freeing napi structures.
9387 */
9388 synchronize_net();
c0c050c5
MC
9389}
9390
9391static void bnxt_init_napi(struct bnxt *bp)
9392{
9393 int i;
10bbdaf5 9394 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
9395 struct bnxt_napi *bnapi;
9396
9397 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
9398 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9399
9400 if (bp->flags & BNXT_FLAG_CHIP_P5)
9401 poll_fn = bnxt_poll_p5;
9402 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
9403 cp_nr_rings--;
9404 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 9405 bnapi = bp->bnapi[i];
b48b89f9 9406 netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
c0c050c5 9407 }
10bbdaf5
PS
9408 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9409 bnapi = bp->bnapi[cp_nr_rings];
9410 netif_napi_add(bp->dev, &bnapi->napi,
b48b89f9 9411 bnxt_poll_nitroa0);
10bbdaf5 9412 }
c0c050c5
MC
9413 } else {
9414 bnapi = bp->bnapi[0];
b48b89f9 9415 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
c0c050c5
MC
9416 }
9417}
9418
9419static void bnxt_disable_napi(struct bnxt *bp)
9420{
9421 int i;
9422
e340a5c4
MC
9423 if (!bp->bnapi ||
9424 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
c0c050c5
MC
9425 return;
9426
0bc0b97f
AG
9427 for (i = 0; i < bp->cp_nr_rings; i++) {
9428 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9429
01cca6b9 9430 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f
AG
9431 if (bp->bnapi[i]->rx_ring)
9432 cancel_work_sync(&cpr->dim.work);
0bc0b97f 9433 }
c0c050c5
MC
9434}
9435
9436static void bnxt_enable_napi(struct bnxt *bp)
9437{
9438 int i;
9439
e340a5c4 9440 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
c0c050c5 9441 for (i = 0; i < bp->cp_nr_rings; i++) {
8a27d4b9
MC
9442 struct bnxt_napi *bnapi = bp->bnapi[i];
9443 struct bnxt_cp_ring_info *cpr;
9444
9445 cpr = &bnapi->cp_ring;
9446 if (bnapi->in_reset)
9447 cpr->sw_stats.rx.rx_resets++;
9448 bnapi->in_reset = false;
6a8788f2 9449
8a27d4b9 9450 if (bnapi->rx_ring) {
6a8788f2 9451 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 9452 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 9453 }
8a27d4b9 9454 napi_enable(&bnapi->napi);
c0c050c5
MC
9455 }
9456}
9457
7df4ae9f 9458void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
9459{
9460 int i;
c0c050c5 9461 struct bnxt_tx_ring_info *txr;
c0c050c5 9462
b6ab4b01 9463 if (bp->tx_ring) {
c0c050c5 9464 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9465 txr = &bp->tx_ring[i];
3c603136 9466 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
c0c050c5
MC
9467 }
9468 }
3c603136
JK
9469 /* Make sure napi polls see @dev_state change */
9470 synchronize_net();
132e0b65
EP
9471 /* Drop carrier first to prevent TX timeout */
9472 netif_carrier_off(bp->dev);
c0c050c5
MC
9473 /* Stop all TX queues */
9474 netif_tx_disable(bp->dev);
c0c050c5
MC
9475}
9476
7df4ae9f 9477void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
9478{
9479 int i;
c0c050c5 9480 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
9481
9482 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9483 txr = &bp->tx_ring[i];
3c603136 9484 WRITE_ONCE(txr->dev_state, 0);
c0c050c5 9485 }
3c603136
JK
9486 /* Make sure napi polls see @dev_state change */
9487 synchronize_net();
c0c050c5 9488 netif_tx_wake_all_queues(bp->dev);
0f5a4841 9489 if (BNXT_LINK_IS_UP(bp))
c0c050c5
MC
9490 netif_carrier_on(bp->dev);
9491}
9492
2046e3c3
MC
9493static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9494{
9495 u8 active_fec = link_info->active_fec_sig_mode &
9496 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9497
9498 switch (active_fec) {
9499 default:
9500 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9501 return "None";
9502 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9503 return "Clause 74 BaseR";
9504 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9505 return "Clause 91 RS(528,514)";
9506 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9507 return "Clause 91 RS544_1XN";
9508 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9509 return "Clause 91 RS(544,514)";
9510 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9511 return "Clause 91 RS272_1XN";
9512 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9513 return "Clause 91 RS(272,257)";
9514 }
9515}
9516
228ea8c1 9517void bnxt_report_link(struct bnxt *bp)
c0c050c5 9518{
0f5a4841 9519 if (BNXT_LINK_IS_UP(bp)) {
1d2deb61 9520 const char *signal = "";
c0c050c5 9521 const char *flow_ctrl;
1d2deb61 9522 const char *duplex;
38a21b34
DK
9523 u32 speed;
9524 u16 fec;
c0c050c5
MC
9525
9526 netif_carrier_on(bp->dev);
8eddb3e7
MC
9527 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9528 if (speed == SPEED_UNKNOWN) {
9529 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9530 return;
9531 }
c0c050c5
MC
9532 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9533 duplex = "full";
9534 else
9535 duplex = "half";
9536 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9537 flow_ctrl = "ON - receive & transmit";
9538 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9539 flow_ctrl = "ON - transmit";
9540 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9541 flow_ctrl = "ON - receive";
9542 else
9543 flow_ctrl = "none";
1d2deb61
EP
9544 if (bp->link_info.phy_qcfg_resp.option_flags &
9545 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9546 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9547 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9548 switch (sig_mode) {
9549 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9550 signal = "(NRZ) ";
9551 break;
9552 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9553 signal = "(PAM4) ";
9554 break;
9555 default:
9556 break;
9557 }
9558 }
9559 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9560 speed, signal, duplex, flow_ctrl);
b0d28207 9561 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
170ce013
MC
9562 netdev_info(bp->dev, "EEE is %s\n",
9563 bp->eee.eee_active ? "active" :
9564 "not active");
e70c752f
MC
9565 fec = bp->link_info.fec_cfg;
9566 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
2046e3c3 9567 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
e70c752f 9568 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
2046e3c3 9569 bnxt_report_fec(&bp->link_info));
c0c050c5
MC
9570 } else {
9571 netif_carrier_off(bp->dev);
9572 netdev_err(bp->dev, "NIC Link is Down\n");
9573 }
9574}
9575
3128e811
MC
9576static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9577{
9578 if (!resp->supported_speeds_auto_mode &&
9579 !resp->supported_speeds_force_mode &&
9580 !resp->supported_pam4_speeds_auto_mode &&
9581 !resp->supported_pam4_speeds_force_mode)
9582 return true;
9583 return false;
9584}
9585
170ce013
MC
9586static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9587{
93ed8117 9588 struct bnxt_link_info *link_info = &bp->link_info;
bbf33d1d
EP
9589 struct hwrm_port_phy_qcaps_output *resp;
9590 struct hwrm_port_phy_qcaps_input *req;
9591 int rc = 0;
170ce013
MC
9592
9593 if (bp->hwrm_spec_code < 0x10201)
9594 return 0;
9595
bbf33d1d
EP
9596 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9597 if (rc)
9598 return rc;
170ce013 9599
bbf33d1d
EP
9600 resp = hwrm_req_hold(bp, req);
9601 rc = hwrm_req_send(bp, req);
170ce013
MC
9602 if (rc)
9603 goto hwrm_phy_qcaps_exit;
9604
9a3bc77e 9605 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
acb20054 9606 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
9607 struct ethtool_eee *eee = &bp->eee;
9608 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9609
170ce013
MC
9610 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9611 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9612 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9613 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9614 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9615 }
fea6b333 9616
3128e811
MC
9617 if (bp->hwrm_spec_code >= 0x10a01) {
9618 if (bnxt_phy_qcaps_no_speed(resp)) {
9619 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9620 netdev_warn(bp->dev, "Ethernet link disabled\n");
9621 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9622 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9623 netdev_info(bp->dev, "Ethernet link enabled\n");
9624 /* Phy re-enabled, reprobe the speeds */
9625 link_info->support_auto_speeds = 0;
9626 link_info->support_pam4_auto_speeds = 0;
9627 }
9628 }
520ad89a
MC
9629 if (resp->supported_speeds_auto_mode)
9630 link_info->support_auto_speeds =
9631 le16_to_cpu(resp->supported_speeds_auto_mode);
d058426e
EP
9632 if (resp->supported_pam4_speeds_auto_mode)
9633 link_info->support_pam4_auto_speeds =
9634 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
170ce013 9635
d5430d31
MC
9636 bp->port_count = resp->port_cnt;
9637
170ce013 9638hwrm_phy_qcaps_exit:
bbf33d1d 9639 hwrm_req_drop(bp, req);
170ce013
MC
9640 return rc;
9641}
9642
c916062a
EP
9643static bool bnxt_support_dropped(u16 advertising, u16 supported)
9644{
9645 u16 diff = advertising ^ supported;
9646
9647 return ((supported | diff) != supported);
9648}
9649
ccd6a9dc 9650int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
c0c050c5 9651{
c0c050c5 9652 struct bnxt_link_info *link_info = &bp->link_info;
bbf33d1d
EP
9653 struct hwrm_port_phy_qcfg_output *resp;
9654 struct hwrm_port_phy_qcfg_input *req;
0f5a4841 9655 u8 link_state = link_info->link_state;
d058426e 9656 bool support_changed = false;
bbf33d1d 9657 int rc;
c0c050c5 9658
bbf33d1d
EP
9659 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9660 if (rc)
9661 return rc;
c0c050c5 9662
bbf33d1d
EP
9663 resp = hwrm_req_hold(bp, req);
9664 rc = hwrm_req_send(bp, req);
c0c050c5 9665 if (rc) {
bbf33d1d 9666 hwrm_req_drop(bp, req);
662c9b22
EP
9667 if (BNXT_VF(bp) && rc == -ENODEV) {
9668 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9669 rc = 0;
9670 }
c0c050c5
MC
9671 return rc;
9672 }
9673
9674 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9675 link_info->phy_link_status = resp->link;
acb20054
MC
9676 link_info->duplex = resp->duplex_cfg;
9677 if (bp->hwrm_spec_code >= 0x10800)
9678 link_info->duplex = resp->duplex_state;
c0c050c5
MC
9679 link_info->pause = resp->pause;
9680 link_info->auto_mode = resp->auto_mode;
9681 link_info->auto_pause_setting = resp->auto_pause;
3277360e 9682 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 9683 link_info->force_pause_setting = resp->force_pause;
acb20054 9684 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
9685 if (link_info->phy_link_status == BNXT_LINK_LINK)
9686 link_info->link_speed = le16_to_cpu(resp->link_speed);
9687 else
9688 link_info->link_speed = 0;
9689 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
d058426e
EP
9690 link_info->force_pam4_link_speed =
9691 le16_to_cpu(resp->force_pam4_link_speed);
c0c050c5 9692 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
d058426e 9693 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
c0c050c5 9694 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
d058426e
EP
9695 link_info->auto_pam4_link_speeds =
9696 le16_to_cpu(resp->auto_pam4_link_speed_mask);
3277360e
MC
9697 link_info->lp_auto_link_speeds =
9698 le16_to_cpu(resp->link_partner_adv_speeds);
d058426e
EP
9699 link_info->lp_auto_pam4_link_speeds =
9700 resp->link_partner_pam4_adv_speeds;
c0c050c5
MC
9701 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9702 link_info->phy_ver[0] = resp->phy_maj;
9703 link_info->phy_ver[1] = resp->phy_min;
9704 link_info->phy_ver[2] = resp->phy_bld;
9705 link_info->media_type = resp->media_type;
03efbec0 9706 link_info->phy_type = resp->phy_type;
11f15ed3 9707 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
9708 link_info->phy_addr = resp->eee_config_phy_addr &
9709 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 9710 link_info->module_status = resp->module_status;
170ce013 9711
b0d28207 9712 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
170ce013
MC
9713 struct ethtool_eee *eee = &bp->eee;
9714 u16 fw_speeds;
9715
9716 eee->eee_active = 0;
9717 if (resp->eee_config_phy_addr &
9718 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9719 eee->eee_active = 1;
9720 fw_speeds = le16_to_cpu(
9721 resp->link_partner_adv_eee_link_speed_mask);
9722 eee->lp_advertised =
9723 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9724 }
9725
9726 /* Pull initial EEE config */
9727 if (!chng_link_state) {
9728 if (resp->eee_config_phy_addr &
9729 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9730 eee->eee_enabled = 1;
c0c050c5 9731
170ce013
MC
9732 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9733 eee->advertised =
9734 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9735
9736 if (resp->eee_config_phy_addr &
9737 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9738 __le32 tmr;
9739
9740 eee->tx_lpi_enabled = 1;
9741 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9742 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9743 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9744 }
9745 }
9746 }
e70c752f
MC
9747
9748 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8b277589 9749 if (bp->hwrm_spec_code >= 0x10504) {
e70c752f 9750 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8b277589
MC
9751 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9752 }
c0c050c5
MC
9753 /* TODO: need to add more logic to report VF link */
9754 if (chng_link_state) {
9755 if (link_info->phy_link_status == BNXT_LINK_LINK)
0f5a4841 9756 link_info->link_state = BNXT_LINK_STATE_UP;
c0c050c5 9757 else
0f5a4841
EP
9758 link_info->link_state = BNXT_LINK_STATE_DOWN;
9759 if (link_state != link_info->link_state)
c0c050c5
MC
9760 bnxt_report_link(bp);
9761 } else {
0f5a4841
EP
9762 /* always link down if not require to update link state */
9763 link_info->link_state = BNXT_LINK_STATE_DOWN;
c0c050c5 9764 }
bbf33d1d 9765 hwrm_req_drop(bp, req);
286ef9d6 9766
c7e457f4 9767 if (!BNXT_PHY_CFG_ABLE(bp))
dac04907
MC
9768 return 0;
9769
c916062a
EP
9770 /* Check if any advertised speeds are no longer supported. The caller
9771 * holds the link_lock mutex, so we can modify link_info settings.
9772 */
9773 if (bnxt_support_dropped(link_info->advertising,
9774 link_info->support_auto_speeds)) {
286ef9d6 9775 link_info->advertising = link_info->support_auto_speeds;
d058426e 9776 support_changed = true;
286ef9d6 9777 }
d058426e
EP
9778 if (bnxt_support_dropped(link_info->advertising_pam4,
9779 link_info->support_pam4_auto_speeds)) {
9780 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9781 support_changed = true;
9782 }
9783 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9784 bnxt_hwrm_set_link_setting(bp, true, false);
c0c050c5
MC
9785 return 0;
9786}
9787
10289bec
MC
9788static void bnxt_get_port_module_status(struct bnxt *bp)
9789{
9790 struct bnxt_link_info *link_info = &bp->link_info;
9791 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9792 u8 module_status;
9793
9794 if (bnxt_update_link(bp, true))
9795 return;
9796
9797 module_status = link_info->module_status;
9798 switch (module_status) {
9799 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9800 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9801 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9802 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9803 bp->pf.port_id);
9804 if (bp->hwrm_spec_code >= 0x10201) {
9805 netdev_warn(bp->dev, "Module part number %s\n",
9806 resp->phy_vendor_partnumber);
9807 }
9808 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9809 netdev_warn(bp->dev, "TX is disabled\n");
9810 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9811 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9812 }
9813}
9814
c0c050c5
MC
9815static void
9816bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9817{
9818 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
9819 if (bp->hwrm_spec_code >= 0x10201)
9820 req->auto_pause =
9821 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
9822 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9823 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9824 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 9825 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
9826 req->enables |=
9827 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9828 } else {
9829 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9830 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9831 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9832 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9833 req->enables |=
9834 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
9835 if (bp->hwrm_spec_code >= 0x10201) {
9836 req->auto_pause = req->force_pause;
9837 req->enables |= cpu_to_le32(
9838 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9839 }
c0c050c5
MC
9840 }
9841}
9842
d058426e 9843static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
c0c050c5 9844{
d058426e
EP
9845 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9846 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9847 if (bp->link_info.advertising) {
9848 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9849 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9850 }
9851 if (bp->link_info.advertising_pam4) {
9852 req->enables |=
9853 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9854 req->auto_link_pam4_speed_mask =
9855 cpu_to_le16(bp->link_info.advertising_pam4);
9856 }
c0c050c5 9857 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
d058426e 9858 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
c0c050c5 9859 } else {
c0c050c5 9860 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
d058426e
EP
9861 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9862 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9863 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9864 } else {
9865 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9866 }
c0c050c5
MC
9867 }
9868
c0c050c5
MC
9869 /* tell chimp that the setting takes effect immediately */
9870 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9871}
9872
9873int bnxt_hwrm_set_pause(struct bnxt *bp)
9874{
bbf33d1d 9875 struct hwrm_port_phy_cfg_input *req;
c0c050c5
MC
9876 int rc;
9877
bbf33d1d
EP
9878 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9879 if (rc)
9880 return rc;
9881
9882 bnxt_hwrm_set_pause_common(bp, req);
c0c050c5
MC
9883
9884 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9885 bp->link_info.force_link_chng)
bbf33d1d 9886 bnxt_hwrm_set_link_common(bp, req);
c0c050c5 9887
bbf33d1d 9888 rc = hwrm_req_send(bp, req);
c0c050c5
MC
9889 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9890 /* since changing of pause setting doesn't trigger any link
9891 * change event, the driver needs to update the current pause
9892 * result upon successfully return of the phy_cfg command
9893 */
9894 bp->link_info.pause =
9895 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9896 bp->link_info.auto_pause_setting = 0;
9897 if (!bp->link_info.force_link_chng)
9898 bnxt_report_link(bp);
9899 }
9900 bp->link_info.force_link_chng = false;
c0c050c5
MC
9901 return rc;
9902}
9903
939f7f0c
MC
9904static void bnxt_hwrm_set_eee(struct bnxt *bp,
9905 struct hwrm_port_phy_cfg_input *req)
9906{
9907 struct ethtool_eee *eee = &bp->eee;
9908
9909 if (eee->eee_enabled) {
9910 u16 eee_speeds;
9911 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9912
9913 if (eee->tx_lpi_enabled)
9914 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9915 else
9916 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9917
9918 req->flags |= cpu_to_le32(flags);
9919 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9920 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9921 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9922 } else {
9923 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9924 }
9925}
9926
9927int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5 9928{
bbf33d1d
EP
9929 struct hwrm_port_phy_cfg_input *req;
9930 int rc;
9931
9932 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9933 if (rc)
9934 return rc;
c0c050c5 9935
c0c050c5 9936 if (set_pause)
bbf33d1d 9937 bnxt_hwrm_set_pause_common(bp, req);
c0c050c5 9938
bbf33d1d 9939 bnxt_hwrm_set_link_common(bp, req);
939f7f0c
MC
9940
9941 if (set_eee)
bbf33d1d
EP
9942 bnxt_hwrm_set_eee(bp, req);
9943 return hwrm_req_send(bp, req);
c0c050c5
MC
9944}
9945
33f7d55f
MC
9946static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9947{
bbf33d1d
EP
9948 struct hwrm_port_phy_cfg_input *req;
9949 int rc;
33f7d55f 9950
567b2abe 9951 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
9952 return 0;
9953
d5ca9905
MC
9954 if (pci_num_vf(bp->pdev) &&
9955 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
33f7d55f
MC
9956 return 0;
9957
bbf33d1d
EP
9958 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9959 if (rc)
9960 return rc;
9961
9962 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
0f5a4841
EP
9963 rc = hwrm_req_send(bp, req);
9964 if (!rc) {
9965 mutex_lock(&bp->link_lock);
9966 /* Device is not obliged link down in certain scenarios, even
9967 * when forced. Setting the state unknown is consistent with
9968 * driver startup and will force link state to be reported
9969 * during subsequent open based on PORT_PHY_QCFG.
9970 */
9971 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9972 mutex_unlock(&bp->link_lock);
9973 }
9974 return rc;
33f7d55f
MC
9975}
9976
b187e4ba
EP
9977static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9978{
9979#ifdef CONFIG_TEE_BNXT_FW
9980 int rc = tee_bnxt_fw_load();
9981
9982 if (rc)
9983 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9984
9985 return rc;
9986#else
9987 netdev_err(bp->dev, "OP-TEE not supported\n");
9988 return -ENODEV;
9989#endif
9990}
9991
9992static int bnxt_try_recover_fw(struct bnxt *bp)
9993{
9994 if (bp->fw_health && bp->fw_health->status_reliable) {
d1cbd165
MC
9995 int retry = 0, rc;
9996 u32 sts;
9997
d1cbd165 9998 do {
d1cbd165 9999 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7b370ad7 10000 rc = bnxt_hwrm_poll(bp);
17e1be34
MC
10001 if (!BNXT_FW_IS_BOOTING(sts) &&
10002 !BNXT_FW_IS_RECOVERING(sts))
d1cbd165
MC
10003 break;
10004 retry++;
10005 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
b187e4ba 10006
d1cbd165
MC
10007 if (!BNXT_FW_IS_HEALTHY(sts)) {
10008 netdev_err(bp->dev,
10009 "Firmware not responding, status: 0x%x\n",
10010 sts);
10011 rc = -ENODEV;
10012 }
b187e4ba
EP
10013 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
10014 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
10015 return bnxt_fw_reset_via_optee(bp);
10016 }
d1cbd165 10017 return rc;
b187e4ba
EP
10018 }
10019
10020 return -ENODEV;
10021}
10022
b4c66425 10023static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
d900aadd
EP
10024{
10025 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
d900aadd
EP
10026
10027 if (!BNXT_NEW_RM(bp))
b4c66425 10028 return; /* no resource reservations required */
d900aadd
EP
10029
10030 hw_resc->resv_cp_rings = 0;
10031 hw_resc->resv_stat_ctxs = 0;
10032 hw_resc->resv_irqs = 0;
10033 hw_resc->resv_tx_rings = 0;
10034 hw_resc->resv_rx_rings = 0;
10035 hw_resc->resv_hw_ring_grps = 0;
10036 hw_resc->resv_vnics = 0;
10037 if (!fw_reset) {
10038 bp->tx_nr_rings = 0;
10039 bp->rx_nr_rings = 0;
10040 }
b4c66425
VG
10041}
10042
10043int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
10044{
10045 int rc;
10046
10047 if (!BNXT_NEW_RM(bp))
10048 return 0; /* no resource reservations required */
10049
10050 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
10051 if (rc)
10052 netdev_err(bp->dev, "resc_qcaps failed\n");
10053
10054 bnxt_clear_reservations(bp, fw_reset);
d900aadd
EP
10055
10056 return rc;
10057}
10058
25e1acd6
MC
10059static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
10060{
bbf33d1d
EP
10061 struct hwrm_func_drv_if_change_output *resp;
10062 struct hwrm_func_drv_if_change_input *req;
20d7d1c5
EP
10063 bool fw_reset = !bp->irq_tbl;
10064 bool resc_reinit = false;
5d06eb5c 10065 int rc, retry = 0;
ec5d31e3 10066 u32 flags = 0;
25e1acd6
MC
10067
10068 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
10069 return 0;
10070
bbf33d1d
EP
10071 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
10072 if (rc)
10073 return rc;
10074
25e1acd6 10075 if (up)
bbf33d1d
EP
10076 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
10077 resp = hwrm_req_hold(bp, req);
10078
10079 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
5d06eb5c 10080 while (retry < BNXT_FW_IF_RETRY) {
bbf33d1d 10081 rc = hwrm_req_send(bp, req);
5d06eb5c
VV
10082 if (rc != -EAGAIN)
10083 break;
10084
10085 msleep(50);
10086 retry++;
10087 }
5d06eb5c 10088
bbf33d1d
EP
10089 if (rc == -EAGAIN) {
10090 hwrm_req_drop(bp, req);
5d06eb5c 10091 return rc;
bbf33d1d
EP
10092 } else if (!rc) {
10093 flags = le32_to_cpu(resp->flags);
10094 } else if (up) {
b187e4ba
EP
10095 rc = bnxt_try_recover_fw(bp);
10096 fw_reset = true;
10097 }
bbf33d1d 10098 hwrm_req_drop(bp, req);
ec5d31e3
MC
10099 if (rc)
10100 return rc;
25e1acd6 10101
43a440c4
MC
10102 if (!up) {
10103 bnxt_inv_fw_health_reg(bp);
ec5d31e3 10104 return 0;
43a440c4 10105 }
25e1acd6 10106
ec5d31e3
MC
10107 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
10108 resc_reinit = true;
4279414b
MC
10109 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
10110 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
ec5d31e3 10111 fw_reset = true;
0e0e3c53
KA
10112 else
10113 bnxt_remap_fw_health_regs(bp);
ec5d31e3 10114
3bc7d4a3
MC
10115 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
10116 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
20d7d1c5 10117 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
3bc7d4a3
MC
10118 return -ENODEV;
10119 }
ec5d31e3
MC
10120 if (resc_reinit || fw_reset) {
10121 if (fw_reset) {
2924ad95 10122 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
f3a6d206
VV
10123 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10124 bnxt_ulp_stop(bp);
325f85f3
MC
10125 bnxt_free_ctx_mem(bp);
10126 kfree(bp->ctx);
10127 bp->ctx = NULL;
843d699d 10128 bnxt_dcb_free(bp);
ec5d31e3
MC
10129 rc = bnxt_fw_init_one(bp);
10130 if (rc) {
2924ad95 10131 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
10132 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10133 return rc;
10134 }
10135 bnxt_clear_int_mode(bp);
10136 rc = bnxt_init_int_mode(bp);
10137 if (rc) {
2924ad95 10138 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
10139 netdev_err(bp->dev, "init int mode failed\n");
10140 return rc;
10141 }
ec5d31e3 10142 }
d900aadd 10143 rc = bnxt_cancel_reservations(bp, fw_reset);
25e1acd6 10144 }
15a7deb8 10145 return rc;
25e1acd6
MC
10146}
10147
5ad2cbee
MC
10148static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
10149{
bbf33d1d
EP
10150 struct hwrm_port_led_qcaps_output *resp;
10151 struct hwrm_port_led_qcaps_input *req;
5ad2cbee
MC
10152 struct bnxt_pf_info *pf = &bp->pf;
10153 int rc;
10154
ba642ab7 10155 bp->num_leds = 0;
5ad2cbee
MC
10156 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
10157 return 0;
10158
bbf33d1d
EP
10159 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
10160 if (rc)
10161 return rc;
10162
10163 req->port_id = cpu_to_le16(pf->port_id);
10164 resp = hwrm_req_hold(bp, req);
10165 rc = hwrm_req_send(bp, req);
5ad2cbee 10166 if (rc) {
bbf33d1d 10167 hwrm_req_drop(bp, req);
5ad2cbee
MC
10168 return rc;
10169 }
10170 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
10171 int i;
10172
10173 bp->num_leds = resp->num_leds;
10174 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
10175 bp->num_leds);
10176 for (i = 0; i < bp->num_leds; i++) {
10177 struct bnxt_led_info *led = &bp->leds[i];
10178 __le16 caps = led->led_state_caps;
10179
10180 if (!led->led_group_id ||
10181 !BNXT_LED_ALT_BLINK_CAP(caps)) {
10182 bp->num_leds = 0;
10183 break;
10184 }
10185 }
10186 }
bbf33d1d 10187 hwrm_req_drop(bp, req);
5ad2cbee
MC
10188 return 0;
10189}
10190
5282db6c
MC
10191int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
10192{
bbf33d1d
EP
10193 struct hwrm_wol_filter_alloc_output *resp;
10194 struct hwrm_wol_filter_alloc_input *req;
5282db6c
MC
10195 int rc;
10196
bbf33d1d
EP
10197 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10198 if (rc)
10199 return rc;
10200
10201 req->port_id = cpu_to_le16(bp->pf.port_id);
10202 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
10203 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
10204 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10205
10206 resp = hwrm_req_hold(bp, req);
10207 rc = hwrm_req_send(bp, req);
5282db6c
MC
10208 if (!rc)
10209 bp->wol_filter_id = resp->wol_filter_id;
bbf33d1d 10210 hwrm_req_drop(bp, req);
5282db6c
MC
10211 return rc;
10212}
10213
10214int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10215{
bbf33d1d
EP
10216 struct hwrm_wol_filter_free_input *req;
10217 int rc;
5282db6c 10218
bbf33d1d
EP
10219 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10220 if (rc)
10221 return rc;
10222
10223 req->port_id = cpu_to_le16(bp->pf.port_id);
10224 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
10225 req->wol_filter_id = bp->wol_filter_id;
10226
10227 return hwrm_req_send(bp, req);
5282db6c
MC
10228}
10229
c1ef146a
MC
10230static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10231{
bbf33d1d
EP
10232 struct hwrm_wol_filter_qcfg_output *resp;
10233 struct hwrm_wol_filter_qcfg_input *req;
c1ef146a
MC
10234 u16 next_handle = 0;
10235 int rc;
10236
bbf33d1d
EP
10237 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10238 if (rc)
10239 return rc;
10240
10241 req->port_id = cpu_to_le16(bp->pf.port_id);
10242 req->handle = cpu_to_le16(handle);
10243 resp = hwrm_req_hold(bp, req);
10244 rc = hwrm_req_send(bp, req);
c1ef146a
MC
10245 if (!rc) {
10246 next_handle = le16_to_cpu(resp->next_handle);
10247 if (next_handle != 0) {
10248 if (resp->wol_type ==
10249 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10250 bp->wol = 1;
10251 bp->wol_filter_id = resp->wol_filter_id;
10252 }
10253 }
10254 }
bbf33d1d 10255 hwrm_req_drop(bp, req);
c1ef146a
MC
10256 return next_handle;
10257}
10258
10259static void bnxt_get_wol_settings(struct bnxt *bp)
10260{
10261 u16 handle = 0;
10262
ba642ab7 10263 bp->wol = 0;
c1ef146a
MC
10264 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10265 return;
10266
10267 do {
10268 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10269 } while (handle && handle != 0xffff);
10270}
10271
cde49a42
VV
10272#ifdef CONFIG_BNXT_HWMON
10273static ssize_t bnxt_show_temp(struct device *dev,
10274 struct device_attribute *devattr, char *buf)
10275{
cde49a42 10276 struct hwrm_temp_monitor_query_output *resp;
bbf33d1d 10277 struct hwrm_temp_monitor_query_input *req;
cde49a42 10278 struct bnxt *bp = dev_get_drvdata(dev);
12cce90b 10279 u32 len = 0;
d69753fa 10280 int rc;
cde49a42 10281
bbf33d1d
EP
10282 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10283 if (rc)
10284 return rc;
10285 resp = hwrm_req_hold(bp, req);
10286 rc = hwrm_req_send(bp, req);
d69753fa 10287 if (!rc)
12cce90b 10288 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
bbf33d1d 10289 hwrm_req_drop(bp, req);
27537929
DC
10290 if (rc)
10291 return rc;
10292 return len;
cde49a42
VV
10293}
10294static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10295
10296static struct attribute *bnxt_attrs[] = {
10297 &sensor_dev_attr_temp1_input.dev_attr.attr,
10298 NULL
10299};
10300ATTRIBUTE_GROUPS(bnxt);
10301
10302static void bnxt_hwmon_close(struct bnxt *bp)
10303{
10304 if (bp->hwmon_dev) {
10305 hwmon_device_unregister(bp->hwmon_dev);
10306 bp->hwmon_dev = NULL;
10307 }
10308}
10309
10310static void bnxt_hwmon_open(struct bnxt *bp)
10311{
bbf33d1d 10312 struct hwrm_temp_monitor_query_input *req;
cde49a42 10313 struct pci_dev *pdev = bp->pdev;
d69753fa
EP
10314 int rc;
10315
bbf33d1d
EP
10316 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10317 if (!rc)
10318 rc = hwrm_req_send_silent(bp, req);
d69753fa
EP
10319 if (rc == -EACCES || rc == -EOPNOTSUPP) {
10320 bnxt_hwmon_close(bp);
10321 return;
10322 }
cde49a42 10323
ba642ab7
MC
10324 if (bp->hwmon_dev)
10325 return;
10326
cde49a42
VV
10327 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10328 DRV_MODULE_NAME, bp,
10329 bnxt_groups);
10330 if (IS_ERR(bp->hwmon_dev)) {
10331 bp->hwmon_dev = NULL;
10332 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10333 }
10334}
10335#else
10336static void bnxt_hwmon_close(struct bnxt *bp)
10337{
10338}
10339
10340static void bnxt_hwmon_open(struct bnxt *bp)
10341{
10342}
10343#endif
10344
939f7f0c
MC
10345static bool bnxt_eee_config_ok(struct bnxt *bp)
10346{
10347 struct ethtool_eee *eee = &bp->eee;
10348 struct bnxt_link_info *link_info = &bp->link_info;
10349
b0d28207 10350 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
939f7f0c
MC
10351 return true;
10352
10353 if (eee->eee_enabled) {
10354 u32 advertising =
10355 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10356
10357 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10358 eee->eee_enabled = 0;
10359 return false;
10360 }
10361 if (eee->advertised & ~advertising) {
10362 eee->advertised = advertising & eee->supported;
10363 return false;
10364 }
10365 }
10366 return true;
10367}
10368
c0c050c5
MC
10369static int bnxt_update_phy_setting(struct bnxt *bp)
10370{
10371 int rc;
10372 bool update_link = false;
10373 bool update_pause = false;
939f7f0c 10374 bool update_eee = false;
c0c050c5
MC
10375 struct bnxt_link_info *link_info = &bp->link_info;
10376
10377 rc = bnxt_update_link(bp, true);
10378 if (rc) {
10379 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10380 rc);
10381 return rc;
10382 }
33dac24a
MC
10383 if (!BNXT_SINGLE_PF(bp))
10384 return 0;
10385
c0c050c5 10386 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
10387 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10388 link_info->req_flow_ctrl)
c0c050c5
MC
10389 update_pause = true;
10390 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10391 link_info->force_pause_setting != link_info->req_flow_ctrl)
10392 update_pause = true;
c0c050c5
MC
10393 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10394 if (BNXT_AUTO_MODE(link_info->auto_mode))
10395 update_link = true;
d058426e
EP
10396 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10397 link_info->req_link_speed != link_info->force_link_speed)
10398 update_link = true;
10399 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10400 link_info->req_link_speed != link_info->force_pam4_link_speed)
c0c050c5 10401 update_link = true;
de73018f
MC
10402 if (link_info->req_duplex != link_info->duplex_setting)
10403 update_link = true;
c0c050c5
MC
10404 } else {
10405 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10406 update_link = true;
d058426e
EP
10407 if (link_info->advertising != link_info->auto_link_speeds ||
10408 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
c0c050c5 10409 update_link = true;
c0c050c5
MC
10410 }
10411
16d663a6
MC
10412 /* The last close may have shutdown the link, so need to call
10413 * PHY_CFG to bring it back up.
10414 */
0f5a4841 10415 if (!BNXT_LINK_IS_UP(bp))
16d663a6
MC
10416 update_link = true;
10417
939f7f0c
MC
10418 if (!bnxt_eee_config_ok(bp))
10419 update_eee = true;
10420
c0c050c5 10421 if (update_link)
939f7f0c 10422 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
10423 else if (update_pause)
10424 rc = bnxt_hwrm_set_pause(bp);
10425 if (rc) {
10426 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10427 rc);
10428 return rc;
10429 }
10430
10431 return rc;
10432}
10433
11809490
JH
10434/* Common routine to pre-map certain register block to different GRC window.
10435 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10436 * in PF and 3 windows in VF that can be customized to map in different
10437 * register blocks.
10438 */
10439static void bnxt_preset_reg_win(struct bnxt *bp)
10440{
10441 if (BNXT_PF(bp)) {
10442 /* CAG registers map to GRC window #4 */
10443 writel(BNXT_CAG_REG_BASE,
10444 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10445 }
10446}
10447
47558acd
MC
10448static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10449
6882c36c
EP
10450static int bnxt_reinit_after_abort(struct bnxt *bp)
10451{
10452 int rc;
10453
10454 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10455 return -EBUSY;
10456
d20cd745
VV
10457 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10458 return -ENODEV;
10459
6882c36c
EP
10460 rc = bnxt_fw_init_one(bp);
10461 if (!rc) {
10462 bnxt_clear_int_mode(bp);
10463 rc = bnxt_init_int_mode(bp);
10464 if (!rc) {
10465 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10466 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10467 }
10468 }
10469 return rc;
10470}
10471
c0c050c5
MC
10472static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10473{
10474 int rc = 0;
10475
11809490 10476 bnxt_preset_reg_win(bp);
c0c050c5
MC
10477 netif_carrier_off(bp->dev);
10478 if (irq_re_init) {
47558acd
MC
10479 /* Reserve rings now if none were reserved at driver probe. */
10480 rc = bnxt_init_dflt_ring_mode(bp);
10481 if (rc) {
10482 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10483 return rc;
10484 }
c0c050c5 10485 }
1b3f0b75 10486 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
10487 if (rc)
10488 return rc;
c0c050c5
MC
10489 if ((bp->flags & BNXT_FLAG_RFS) &&
10490 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10491 /* disable RFS if falling back to INTA */
10492 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10493 bp->flags &= ~BNXT_FLAG_RFS;
10494 }
10495
10496 rc = bnxt_alloc_mem(bp, irq_re_init);
10497 if (rc) {
10498 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10499 goto open_err_free_mem;
10500 }
10501
10502 if (irq_re_init) {
10503 bnxt_init_napi(bp);
10504 rc = bnxt_request_irq(bp);
10505 if (rc) {
10506 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 10507 goto open_err_irq;
c0c050c5
MC
10508 }
10509 }
10510
c0c050c5
MC
10511 rc = bnxt_init_nic(bp, irq_re_init);
10512 if (rc) {
10513 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
96ecdcc9 10514 goto open_err_irq;
c0c050c5
MC
10515 }
10516
96ecdcc9
JK
10517 bnxt_enable_napi(bp);
10518 bnxt_debug_dev_init(bp);
10519
c0c050c5 10520 if (link_re_init) {
e2dc9b6e 10521 mutex_lock(&bp->link_lock);
c0c050c5 10522 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 10523 mutex_unlock(&bp->link_lock);
a1ef4a79 10524 if (rc) {
ba41d46f 10525 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
10526 if (BNXT_SINGLE_PF(bp)) {
10527 bp->link_info.phy_retry = true;
10528 bp->link_info.phy_retry_expires =
10529 jiffies + 5 * HZ;
10530 }
10531 }
c0c050c5
MC
10532 }
10533
7cdd5fc3 10534 if (irq_re_init)
442a35a5 10535 udp_tunnel_nic_reset_ntf(bp->dev);
c0c050c5 10536
4f81def2
PC
10537 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
10538 if (!static_key_enabled(&bnxt_xdp_locking_key))
10539 static_branch_enable(&bnxt_xdp_locking_key);
10540 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
10541 static_branch_disable(&bnxt_xdp_locking_key);
10542 }
caefe526 10543 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
10544 bnxt_enable_int(bp);
10545 /* Enable TX queues */
10546 bnxt_tx_enable(bp);
10547 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec 10548 /* Poll link status and check for SFP+ module status */
3c10ed49 10549 mutex_lock(&bp->link_lock);
10289bec 10550 bnxt_get_port_module_status(bp);
3c10ed49 10551 mutex_unlock(&bp->link_lock);
c0c050c5 10552
ee5c7fb3
SP
10553 /* VF-reps may need to be re-opened after the PF is re-opened */
10554 if (BNXT_PF(bp))
10555 bnxt_vf_reps_open(bp);
24ac1ecd 10556 bnxt_ptp_init_rtc(bp, true);
11862689 10557 bnxt_ptp_cfg_tstamp_filters(bp);
c0c050c5
MC
10558 return 0;
10559
c58387ab 10560open_err_irq:
c0c050c5
MC
10561 bnxt_del_napi(bp);
10562
10563open_err_free_mem:
10564 bnxt_free_skbs(bp);
10565 bnxt_free_irq(bp);
10566 bnxt_free_mem(bp, true);
10567 return rc;
10568}
10569
10570/* rtnl_lock held */
10571int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10572{
10573 int rc = 0;
10574
a1301f08
MC
10575 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10576 rc = -EIO;
10577 if (!rc)
10578 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10579 if (rc) {
10580 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10581 dev_close(bp->dev);
10582 }
10583 return rc;
10584}
10585
f7dc1ea6
MC
10586/* rtnl_lock held, open the NIC half way by allocating all resources, but
10587 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10588 * self tests.
10589 */
10590int bnxt_half_open_nic(struct bnxt *bp)
10591{
10592 int rc = 0;
10593
11a39259
SK
10594 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10595 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10596 rc = -ENODEV;
10597 goto half_open_err;
10598 }
10599
6758f937 10600 rc = bnxt_alloc_mem(bp, true);
f7dc1ea6
MC
10601 if (rc) {
10602 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10603 goto half_open_err;
10604 }
cfcab3b3 10605 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
6758f937 10606 rc = bnxt_init_nic(bp, true);
f7dc1ea6 10607 if (rc) {
cfcab3b3 10608 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
f7dc1ea6
MC
10609 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10610 goto half_open_err;
10611 }
10612 return 0;
10613
10614half_open_err:
10615 bnxt_free_skbs(bp);
6758f937 10616 bnxt_free_mem(bp, true);
f7dc1ea6
MC
10617 dev_close(bp->dev);
10618 return rc;
10619}
10620
10621/* rtnl_lock held, this call can only be made after a previous successful
10622 * call to bnxt_half_open_nic().
10623 */
10624void bnxt_half_close_nic(struct bnxt *bp)
10625{
6758f937 10626 bnxt_hwrm_resource_free(bp, false, true);
f7dc1ea6 10627 bnxt_free_skbs(bp);
6758f937 10628 bnxt_free_mem(bp, true);
cfcab3b3 10629 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
f7dc1ea6
MC
10630}
10631
228ea8c1 10632void bnxt_reenable_sriov(struct bnxt *bp)
c16d4ee0
MC
10633{
10634 if (BNXT_PF(bp)) {
10635 struct bnxt_pf_info *pf = &bp->pf;
10636 int n = pf->active_vfs;
10637
10638 if (n)
10639 bnxt_cfg_hw_sriov(bp, &n, true);
10640 }
10641}
10642
c0c050c5
MC
10643static int bnxt_open(struct net_device *dev)
10644{
10645 struct bnxt *bp = netdev_priv(dev);
25e1acd6 10646 int rc;
c0c050c5 10647
ec5d31e3 10648 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
6882c36c
EP
10649 rc = bnxt_reinit_after_abort(bp);
10650 if (rc) {
10651 if (rc == -EBUSY)
10652 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10653 else
10654 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10655 return -ENODEV;
10656 }
ec5d31e3
MC
10657 }
10658
10659 rc = bnxt_hwrm_if_change(bp, true);
25e1acd6 10660 if (rc)
ec5d31e3 10661 return rc;
d7859afb 10662
ec5d31e3
MC
10663 rc = __bnxt_open_nic(bp, true, true);
10664 if (rc) {
25e1acd6 10665 bnxt_hwrm_if_change(bp, false);
ec5d31e3 10666 } else {
f3a6d206 10667 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12de2ead 10668 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
f3a6d206 10669 bnxt_ulp_start(bp, 0);
12de2ead
MC
10670 bnxt_reenable_sriov(bp);
10671 }
ec5d31e3
MC
10672 }
10673 bnxt_hwmon_open(bp);
10674 }
cde49a42 10675
25e1acd6 10676 return rc;
c0c050c5
MC
10677}
10678
f9b76ebd
MC
10679static bool bnxt_drv_busy(struct bnxt *bp)
10680{
10681 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10682 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10683}
10684
b8875ca3
MC
10685static void bnxt_get_ring_stats(struct bnxt *bp,
10686 struct rtnl_link_stats64 *stats);
10687
86e953db
MC
10688static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10689 bool link_re_init)
c0c050c5 10690{
ee5c7fb3
SP
10691 /* Close the VF-reps before closing PF */
10692 if (BNXT_PF(bp))
10693 bnxt_vf_reps_close(bp);
86e953db 10694
c0c050c5
MC
10695 /* Change device state to avoid TX queue wake up's */
10696 bnxt_tx_disable(bp);
10697
caefe526 10698 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 10699 smp_mb__after_atomic();
f9b76ebd 10700 while (bnxt_drv_busy(bp))
4cebdcec 10701 msleep(20);
c0c050c5 10702
c909e7ca 10703 /* Flush rings and disable interrupts */
c0c050c5
MC
10704 bnxt_shutdown_nic(bp, irq_re_init);
10705
10706 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10707
cabfb09d 10708 bnxt_debug_dev_exit(bp);
c0c050c5 10709 bnxt_disable_napi(bp);
c0c050c5
MC
10710 del_timer_sync(&bp->timer);
10711 bnxt_free_skbs(bp);
10712
b8875ca3 10713 /* Save ring stats before shutdown */
b8056e84 10714 if (bp->bnapi && irq_re_init)
b8875ca3 10715 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
10716 if (irq_re_init) {
10717 bnxt_free_irq(bp);
10718 bnxt_del_napi(bp);
10719 }
10720 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
10721}
10722
10723int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10724{
10725 int rc = 0;
10726
3bc7d4a3
MC
10727 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10728 /* If we get here, it means firmware reset is in progress
10729 * while we are trying to close. We can safely proceed with
10730 * the close because we are holding rtnl_lock(). Some firmware
10731 * messages may fail as we proceed to close. We set the
10732 * ABORT_ERR flag here so that the FW reset thread will later
10733 * abort when it gets the rtnl_lock() and sees the flag.
10734 */
10735 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10736 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10737 }
10738
86e953db
MC
10739#ifdef CONFIG_BNXT_SRIOV
10740 if (bp->sriov_cfg) {
10741 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10742 !bp->sriov_cfg,
10743 BNXT_SRIOV_CFG_WAIT_TMO);
10744 if (rc)
10745 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10746 }
10747#endif
10748 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10749 return rc;
10750}
10751
10752static int bnxt_close(struct net_device *dev)
10753{
10754 struct bnxt *bp = netdev_priv(dev);
10755
cde49a42 10756 bnxt_hwmon_close(bp);
c0c050c5 10757 bnxt_close_nic(bp, true, true);
33f7d55f 10758 bnxt_hwrm_shutdown_link(bp);
25e1acd6 10759 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
10760 return 0;
10761}
10762
0ca12be9
VV
10763static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10764 u16 *val)
10765{
bbf33d1d
EP
10766 struct hwrm_port_phy_mdio_read_output *resp;
10767 struct hwrm_port_phy_mdio_read_input *req;
0ca12be9
VV
10768 int rc;
10769
10770 if (bp->hwrm_spec_code < 0x10a00)
10771 return -EOPNOTSUPP;
10772
bbf33d1d
EP
10773 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10774 if (rc)
10775 return rc;
10776
10777 req->port_id = cpu_to_le16(bp->pf.port_id);
10778 req->phy_addr = phy_addr;
10779 req->reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10780 if (mdio_phy_id_is_c45(phy_addr)) {
bbf33d1d
EP
10781 req->cl45_mdio = 1;
10782 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10783 req->dev_addr = mdio_phy_id_devad(phy_addr);
10784 req->reg_addr = cpu_to_le16(reg);
0ca12be9
VV
10785 }
10786
bbf33d1d
EP
10787 resp = hwrm_req_hold(bp, req);
10788 rc = hwrm_req_send(bp, req);
0ca12be9
VV
10789 if (!rc)
10790 *val = le16_to_cpu(resp->reg_data);
bbf33d1d 10791 hwrm_req_drop(bp, req);
0ca12be9
VV
10792 return rc;
10793}
10794
10795static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10796 u16 val)
10797{
bbf33d1d
EP
10798 struct hwrm_port_phy_mdio_write_input *req;
10799 int rc;
0ca12be9
VV
10800
10801 if (bp->hwrm_spec_code < 0x10a00)
10802 return -EOPNOTSUPP;
10803
bbf33d1d
EP
10804 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10805 if (rc)
10806 return rc;
10807
10808 req->port_id = cpu_to_le16(bp->pf.port_id);
10809 req->phy_addr = phy_addr;
10810 req->reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10811 if (mdio_phy_id_is_c45(phy_addr)) {
bbf33d1d
EP
10812 req->cl45_mdio = 1;
10813 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10814 req->dev_addr = mdio_phy_id_devad(phy_addr);
10815 req->reg_addr = cpu_to_le16(reg);
0ca12be9 10816 }
bbf33d1d 10817 req->reg_data = cpu_to_le16(val);
0ca12be9 10818
bbf33d1d 10819 return hwrm_req_send(bp, req);
0ca12be9
VV
10820}
10821
c0c050c5
MC
10822/* rtnl_lock held */
10823static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10824{
0ca12be9
VV
10825 struct mii_ioctl_data *mdio = if_mii(ifr);
10826 struct bnxt *bp = netdev_priv(dev);
10827 int rc;
10828
c0c050c5
MC
10829 switch (cmd) {
10830 case SIOCGMIIPHY:
0ca12be9
VV
10831 mdio->phy_id = bp->link_info.phy_addr;
10832
df561f66 10833 fallthrough;
c0c050c5 10834 case SIOCGMIIREG: {
0ca12be9
VV
10835 u16 mii_regval = 0;
10836
c0c050c5
MC
10837 if (!netif_running(dev))
10838 return -EAGAIN;
10839
0ca12be9
VV
10840 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10841 &mii_regval);
10842 mdio->val_out = mii_regval;
10843 return rc;
c0c050c5
MC
10844 }
10845
10846 case SIOCSMIIREG:
10847 if (!netif_running(dev))
10848 return -EAGAIN;
10849
0ca12be9
VV
10850 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10851 mdio->val_in);
c0c050c5 10852
118612d5
MC
10853 case SIOCSHWTSTAMP:
10854 return bnxt_hwtstamp_set(dev, ifr);
10855
10856 case SIOCGHWTSTAMP:
10857 return bnxt_hwtstamp_get(dev, ifr);
10858
c0c050c5
MC
10859 default:
10860 /* do nothing */
10861 break;
10862 }
10863 return -EOPNOTSUPP;
10864}
10865
b8875ca3
MC
10866static void bnxt_get_ring_stats(struct bnxt *bp,
10867 struct rtnl_link_stats64 *stats)
c0c050c5 10868{
b8875ca3 10869 int i;
c0c050c5 10870
c0c050c5
MC
10871 for (i = 0; i < bp->cp_nr_rings; i++) {
10872 struct bnxt_napi *bnapi = bp->bnapi[i];
10873 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
a0c30621 10874 u64 *sw = cpr->stats.sw_stats;
c0c050c5 10875
a0c30621
MC
10876 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10877 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10878 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
c0c050c5 10879
a0c30621
MC
10880 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10881 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10882 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
c0c050c5 10883
a0c30621
MC
10884 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10885 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10886 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
c0c050c5 10887
a0c30621
MC
10888 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10889 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10890 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
c0c050c5
MC
10891
10892 stats->rx_missed_errors +=
a0c30621 10893 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
c0c050c5 10894
a0c30621 10895 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
c0c050c5 10896
a0c30621 10897 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
40bedf7c 10898
907fd4a2
JK
10899 stats->rx_dropped +=
10900 cpr->sw_stats.rx.rx_netpoll_discards +
10901 cpr->sw_stats.rx.rx_oom_discards;
c0c050c5 10902 }
b8875ca3
MC
10903}
10904
10905static void bnxt_add_prev_stats(struct bnxt *bp,
10906 struct rtnl_link_stats64 *stats)
10907{
10908 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10909
10910 stats->rx_packets += prev_stats->rx_packets;
10911 stats->tx_packets += prev_stats->tx_packets;
10912 stats->rx_bytes += prev_stats->rx_bytes;
10913 stats->tx_bytes += prev_stats->tx_bytes;
10914 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10915 stats->multicast += prev_stats->multicast;
40bedf7c 10916 stats->rx_dropped += prev_stats->rx_dropped;
b8875ca3
MC
10917 stats->tx_dropped += prev_stats->tx_dropped;
10918}
10919
10920static void
10921bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10922{
10923 struct bnxt *bp = netdev_priv(dev);
10924
10925 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10926 /* Make sure bnxt_close_nic() sees that we are reading stats before
10927 * we check the BNXT_STATE_OPEN flag.
10928 */
10929 smp_mb__after_atomic();
10930 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10931 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10932 *stats = bp->net_stats_prev;
10933 return;
10934 }
10935
10936 bnxt_get_ring_stats(bp, stats);
10937 bnxt_add_prev_stats(bp, stats);
c0c050c5 10938
9947f83f 10939 if (bp->flags & BNXT_FLAG_PORT_STATS) {
a0c30621
MC
10940 u64 *rx = bp->port_stats.sw_stats;
10941 u64 *tx = bp->port_stats.sw_stats +
10942 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10943
10944 stats->rx_crc_errors =
10945 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10946 stats->rx_frame_errors =
10947 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10948 stats->rx_length_errors =
10949 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10950 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10951 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10952 stats->rx_errors =
10953 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10954 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10955 stats->collisions =
10956 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10957 stats->tx_fifo_errors =
10958 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10959 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
9947f83f 10960 }
f9b76ebd 10961 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
10962}
10963
10964static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10965{
10966 struct net_device *dev = bp->dev;
10967 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10968 struct netdev_hw_addr *ha;
10969 u8 *haddr;
10970 int mc_count = 0;
10971 bool update = false;
10972 int off = 0;
10973
10974 netdev_for_each_mc_addr(ha, dev) {
10975 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10976 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10977 vnic->mc_list_count = 0;
10978 return false;
10979 }
10980 haddr = ha->addr;
10981 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10982 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10983 update = true;
10984 }
10985 off += ETH_ALEN;
10986 mc_count++;
10987 }
10988 if (mc_count)
10989 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10990
10991 if (mc_count != vnic->mc_list_count) {
10992 vnic->mc_list_count = mc_count;
10993 update = true;
10994 }
10995 return update;
10996}
10997
10998static bool bnxt_uc_list_updated(struct bnxt *bp)
10999{
11000 struct net_device *dev = bp->dev;
11001 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11002 struct netdev_hw_addr *ha;
11003 int off = 0;
11004
11005 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
11006 return true;
11007
11008 netdev_for_each_uc_addr(ha, dev) {
11009 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
11010 return true;
11011
11012 off += ETH_ALEN;
11013 }
11014 return false;
11015}
11016
11017static void bnxt_set_rx_mode(struct net_device *dev)
11018{
11019 struct bnxt *bp = netdev_priv(dev);
268d0895 11020 struct bnxt_vnic_info *vnic;
c0c050c5
MC
11021 bool mc_update = false;
11022 bool uc_update;
268d0895 11023 u32 mask;
c0c050c5 11024
268d0895 11025 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
11026 return;
11027
268d0895
MC
11028 vnic = &bp->vnic_info[0];
11029 mask = vnic->rx_mask;
c0c050c5
MC
11030 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
11031 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
11032 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
11033 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 11034
dd85fc0a 11035 if (dev->flags & IFF_PROMISC)
c0c050c5
MC
11036 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11037
11038 uc_update = bnxt_uc_list_updated(bp);
11039
30e33848
MC
11040 if (dev->flags & IFF_BROADCAST)
11041 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
11042 if (dev->flags & IFF_ALLMULTI) {
11043 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11044 vnic->mc_list_count = 0;
8cdb1592 11045 } else if (dev->flags & IFF_MULTICAST) {
c0c050c5
MC
11046 mc_update = bnxt_mc_list_updated(bp, &mask);
11047 }
11048
11049 if (mask != vnic->rx_mask || uc_update || mc_update) {
11050 vnic->rx_mask = mask;
11051
11052 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 11053 bnxt_queue_sp_work(bp);
c0c050c5
MC
11054 }
11055}
11056
b664f008 11057static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
11058{
11059 struct net_device *dev = bp->dev;
11060 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
bbf33d1d 11061 struct hwrm_cfa_l2_filter_free_input *req;
c0c050c5
MC
11062 struct netdev_hw_addr *ha;
11063 int i, off = 0, rc;
11064 bool uc_update;
11065
11066 netif_addr_lock_bh(dev);
11067 uc_update = bnxt_uc_list_updated(bp);
11068 netif_addr_unlock_bh(dev);
11069
11070 if (!uc_update)
11071 goto skip_uc;
11072
bbf33d1d
EP
11073 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
11074 if (rc)
11075 return rc;
11076 hwrm_req_hold(bp, req);
c0c050c5 11077 for (i = 1; i < vnic->uc_filter_count; i++) {
bbf33d1d 11078 req->l2_filter_id = vnic->fw_l2_filter_id[i];
c0c050c5 11079
bbf33d1d 11080 rc = hwrm_req_send(bp, req);
c0c050c5 11081 }
bbf33d1d 11082 hwrm_req_drop(bp, req);
c0c050c5
MC
11083
11084 vnic->uc_filter_count = 1;
11085
11086 netif_addr_lock_bh(dev);
11087 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
11088 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11089 } else {
11090 netdev_for_each_uc_addr(ha, dev) {
11091 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
11092 off += ETH_ALEN;
11093 vnic->uc_filter_count++;
11094 }
11095 }
11096 netif_addr_unlock_bh(dev);
11097
11098 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
11099 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
11100 if (rc) {
662c9b22
EP
11101 if (BNXT_VF(bp) && rc == -ENODEV) {
11102 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11103 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
11104 else
11105 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
11106 rc = 0;
11107 } else {
11108 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11109 }
c0c050c5 11110 vnic->uc_filter_count = i;
b664f008 11111 return rc;
c0c050c5
MC
11112 }
11113 }
662c9b22
EP
11114 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11115 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
c0c050c5
MC
11116
11117skip_uc:
dd85fc0a
EP
11118 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
11119 !bnxt_promisc_ok(bp))
11120 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
c0c050c5 11121 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8cdb1592 11122 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
b4e30e8e
MC
11123 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
11124 rc);
8cdb1592 11125 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
b4e30e8e
MC
11126 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11127 vnic->mc_list_count = 0;
11128 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11129 }
c0c050c5 11130 if (rc)
b4e30e8e 11131 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 11132 rc);
b664f008
MC
11133
11134 return rc;
c0c050c5
MC
11135}
11136
2773dfb2
MC
11137static bool bnxt_can_reserve_rings(struct bnxt *bp)
11138{
11139#ifdef CONFIG_BNXT_SRIOV
f1ca94de 11140 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
11141 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11142
11143 /* No minimum rings were provisioned by the PF. Don't
11144 * reserve rings by default when device is down.
11145 */
11146 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
11147 return true;
11148
11149 if (!netif_running(bp->dev))
11150 return false;
11151 }
11152#endif
11153 return true;
11154}
11155
8079e8f1
MC
11156/* If the chip and firmware supports RFS */
11157static bool bnxt_rfs_supported(struct bnxt *bp)
11158{
e969ae5b 11159 if (bp->flags & BNXT_FLAG_CHIP_P5) {
41136ab3 11160 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
e969ae5b 11161 return true;
41e8d798 11162 return false;
e969ae5b 11163 }
976e52b7
MC
11164 /* 212 firmware is broken for aRFS */
11165 if (BNXT_FW_MAJ(bp) == 212)
11166 return false;
8079e8f1
MC
11167 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
11168 return true;
ae10ae74
MC
11169 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11170 return true;
8079e8f1
MC
11171 return false;
11172}
11173
11174/* If runtime conditions support RFS */
2bcfa6f6
MC
11175static bool bnxt_rfs_capable(struct bnxt *bp)
11176{
11177#ifdef CONFIG_RFS_ACCEL
8079e8f1 11178 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 11179
41e8d798 11180 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 11181 return bnxt_rfs_supported(bp);
13ba7943 11182 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
2bcfa6f6
MC
11183 return false;
11184
11185 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
11186 max_vnics = bnxt_get_max_func_vnics(bp);
11187 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
11188
11189 /* RSS contexts not a limiting factor */
11190 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11191 max_rss_ctxs = max_vnics;
8079e8f1 11192 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
11193 if (bp->rx_nr_rings > 1)
11194 netdev_warn(bp->dev,
11195 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
11196 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 11197 return false;
a2304909 11198 }
2bcfa6f6 11199
f1ca94de 11200 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
11201 return true;
11202
11203 if (vnics == bp->hw_resc.resv_vnics)
11204 return true;
11205
780baad4 11206 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
11207 if (vnics <= bp->hw_resc.resv_vnics)
11208 return true;
11209
11210 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 11211 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 11212 return false;
2bcfa6f6
MC
11213#else
11214 return false;
11215#endif
11216}
11217
c0c050c5
MC
11218static netdev_features_t bnxt_fix_features(struct net_device *dev,
11219 netdev_features_t features)
11220{
2bcfa6f6 11221 struct bnxt *bp = netdev_priv(dev);
c72cb303 11222 netdev_features_t vlan_features;
2bcfa6f6 11223
a2304909 11224 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 11225 features &= ~NETIF_F_NTUPLE;
5a9f6b23 11226
366c3047 11227 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
1dc4c557
AG
11228 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11229
1054aee8
MC
11230 if (!(features & NETIF_F_GRO))
11231 features &= ~NETIF_F_GRO_HW;
11232
11233 if (features & NETIF_F_GRO_HW)
11234 features &= ~NETIF_F_LRO;
11235
5a9f6b23
MC
11236 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
11237 * turned on or off together.
11238 */
a196e96b
EP
11239 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
11240 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
11241 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11242 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
c72cb303 11243 else if (vlan_features)
a196e96b 11244 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
5a9f6b23 11245 }
cf6645f8 11246#ifdef CONFIG_BNXT_SRIOV
a196e96b
EP
11247 if (BNXT_VF(bp) && bp->vf.vlan)
11248 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
cf6645f8 11249#endif
c0c050c5
MC
11250 return features;
11251}
11252
11253static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
11254{
11255 struct bnxt *bp = netdev_priv(dev);
11256 u32 flags = bp->flags;
11257 u32 changes;
11258 int rc = 0;
11259 bool re_init = false;
11260 bool update_tpa = false;
11261
11262 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 11263 if (features & NETIF_F_GRO_HW)
c0c050c5 11264 flags |= BNXT_FLAG_GRO;
1054aee8 11265 else if (features & NETIF_F_LRO)
c0c050c5
MC
11266 flags |= BNXT_FLAG_LRO;
11267
bdbd1eb5
MC
11268 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11269 flags &= ~BNXT_FLAG_TPA;
11270
a196e96b 11271 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
11272 flags |= BNXT_FLAG_STRIP_VLAN;
11273
11274 if (features & NETIF_F_NTUPLE)
11275 flags |= BNXT_FLAG_RFS;
11276
11277 changes = flags ^ bp->flags;
11278 if (changes & BNXT_FLAG_TPA) {
11279 update_tpa = true;
11280 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
11281 (flags & BNXT_FLAG_TPA) == 0 ||
11282 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
11283 re_init = true;
11284 }
11285
11286 if (changes & ~BNXT_FLAG_TPA)
11287 re_init = true;
11288
11289 if (flags != bp->flags) {
11290 u32 old_flags = bp->flags;
11291
2bcfa6f6 11292 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 11293 bp->flags = flags;
c0c050c5
MC
11294 if (update_tpa)
11295 bnxt_set_ring_params(bp);
11296 return rc;
11297 }
11298
11299 if (re_init) {
11300 bnxt_close_nic(bp, false, false);
f45b7b78 11301 bp->flags = flags;
c0c050c5
MC
11302 if (update_tpa)
11303 bnxt_set_ring_params(bp);
11304
11305 return bnxt_open_nic(bp, false, false);
11306 }
11307 if (update_tpa) {
f45b7b78 11308 bp->flags = flags;
c0c050c5
MC
11309 rc = bnxt_set_tpa(bp,
11310 (flags & BNXT_FLAG_TPA) ?
11311 true : false);
11312 if (rc)
11313 bp->flags = old_flags;
11314 }
11315 }
11316 return rc;
11317}
11318
aa473d6c
MC
11319static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11320 u8 **nextp)
11321{
11322 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
b6488b16 11323 struct hop_jumbo_hdr *jhdr;
aa473d6c
MC
11324 int hdr_count = 0;
11325 u8 *nexthdr;
11326 int start;
11327
11328 /* Check that there are at most 2 IPv6 extension headers, no
11329 * fragment header, and each is <= 64 bytes.
11330 */
11331 start = nw_off + sizeof(*ip6h);
11332 nexthdr = &ip6h->nexthdr;
11333 while (ipv6_ext_hdr(*nexthdr)) {
11334 struct ipv6_opt_hdr *hp;
11335 int hdrlen;
11336
11337 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11338 *nexthdr == NEXTHDR_FRAGMENT)
11339 return false;
11340 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11341 skb_headlen(skb), NULL);
11342 if (!hp)
11343 return false;
11344 if (*nexthdr == NEXTHDR_AUTH)
11345 hdrlen = ipv6_authlen(hp);
11346 else
11347 hdrlen = ipv6_optlen(hp);
11348
11349 if (hdrlen > 64)
11350 return false;
b6488b16
CL
11351
11352 /* The ext header may be a hop-by-hop header inserted for
11353 * big TCP purposes. This will be removed before sending
11354 * from NIC, so do not count it.
11355 */
11356 if (*nexthdr == NEXTHDR_HOP) {
11357 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
11358 goto increment_hdr;
11359
11360 jhdr = (struct hop_jumbo_hdr *)hp;
11361 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
11362 jhdr->nexthdr != IPPROTO_TCP)
11363 goto increment_hdr;
11364
11365 goto next_hdr;
11366 }
11367increment_hdr:
11368 hdr_count++;
11369next_hdr:
aa473d6c
MC
11370 nexthdr = &hp->nexthdr;
11371 start += hdrlen;
aa473d6c
MC
11372 }
11373 if (nextp) {
11374 /* Caller will check inner protocol */
11375 if (skb->encapsulation) {
11376 *nextp = nexthdr;
11377 return true;
11378 }
11379 *nextp = NULL;
11380 }
11381 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11382 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11383}
11384
11385/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11386static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11387{
11388 struct udphdr *uh = udp_hdr(skb);
11389 __be16 udp_port = uh->dest;
11390
11391 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11392 return false;
11393 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11394 struct ethhdr *eh = inner_eth_hdr(skb);
11395
11396 switch (eh->h_proto) {
11397 case htons(ETH_P_IP):
11398 return true;
11399 case htons(ETH_P_IPV6):
11400 return bnxt_exthdr_check(bp, skb,
11401 skb_inner_network_offset(skb),
11402 NULL);
11403 }
11404 }
11405 return false;
11406}
11407
11408static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11409{
11410 switch (l4_proto) {
11411 case IPPROTO_UDP:
11412 return bnxt_udp_tunl_check(bp, skb);
11413 case IPPROTO_IPIP:
11414 return true;
11415 case IPPROTO_GRE: {
11416 switch (skb->inner_protocol) {
11417 default:
11418 return false;
11419 case htons(ETH_P_IP):
11420 return true;
11421 case htons(ETH_P_IPV6):
11422 fallthrough;
11423 }
11424 }
11425 case IPPROTO_IPV6:
11426 /* Check ext headers of inner ipv6 */
11427 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11428 NULL);
11429 }
11430 return false;
11431}
11432
1698d600
MC
11433static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11434 struct net_device *dev,
11435 netdev_features_t features)
11436{
aa473d6c
MC
11437 struct bnxt *bp = netdev_priv(dev);
11438 u8 *l4_proto;
1698d600
MC
11439
11440 features = vlan_features_check(skb, features);
1698d600
MC
11441 switch (vlan_get_protocol(skb)) {
11442 case htons(ETH_P_IP):
aa473d6c
MC
11443 if (!skb->encapsulation)
11444 return features;
11445 l4_proto = &ip_hdr(skb)->protocol;
11446 if (bnxt_tunl_check(bp, skb, *l4_proto))
11447 return features;
1698d600
MC
11448 break;
11449 case htons(ETH_P_IPV6):
aa473d6c
MC
11450 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11451 &l4_proto))
11452 break;
11453 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11454 return features;
1698d600 11455 break;
1698d600 11456 }
1698d600
MC
11457 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11458}
11459
b5d600b0
VV
11460int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11461 u32 *reg_buf)
11462{
bbf33d1d
EP
11463 struct hwrm_dbg_read_direct_output *resp;
11464 struct hwrm_dbg_read_direct_input *req;
b5d600b0
VV
11465 __le32 *dbg_reg_buf;
11466 dma_addr_t mapping;
11467 int rc, i;
11468
bbf33d1d
EP
11469 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11470 if (rc)
11471 return rc;
11472
11473 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11474 &mapping);
11475 if (!dbg_reg_buf) {
11476 rc = -ENOMEM;
11477 goto dbg_rd_reg_exit;
11478 }
11479
11480 req->host_dest_addr = cpu_to_le64(mapping);
11481
11482 resp = hwrm_req_hold(bp, req);
11483 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11484 req->read_len32 = cpu_to_le32(num_words);
11485
11486 rc = hwrm_req_send(bp, req);
b5d600b0
VV
11487 if (rc || resp->error_code) {
11488 rc = -EIO;
11489 goto dbg_rd_reg_exit;
11490 }
11491 for (i = 0; i < num_words; i++)
11492 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11493
11494dbg_rd_reg_exit:
bbf33d1d 11495 hwrm_req_drop(bp, req);
b5d600b0
VV
11496 return rc;
11497}
11498
ffd77621
MC
11499static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11500 u32 ring_id, u32 *prod, u32 *cons)
11501{
bbf33d1d
EP
11502 struct hwrm_dbg_ring_info_get_output *resp;
11503 struct hwrm_dbg_ring_info_get_input *req;
ffd77621
MC
11504 int rc;
11505
bbf33d1d
EP
11506 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11507 if (rc)
11508 return rc;
11509
11510 req->ring_type = ring_type;
11511 req->fw_ring_id = cpu_to_le32(ring_id);
11512 resp = hwrm_req_hold(bp, req);
11513 rc = hwrm_req_send(bp, req);
ffd77621
MC
11514 if (!rc) {
11515 *prod = le32_to_cpu(resp->producer_index);
11516 *cons = le32_to_cpu(resp->consumer_index);
11517 }
bbf33d1d 11518 hwrm_req_drop(bp, req);
ffd77621
MC
11519 return rc;
11520}
11521
9f554590
MC
11522static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11523{
b6ab4b01 11524 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
11525 int i = bnapi->index;
11526
3b2b7d9d
MC
11527 if (!txr)
11528 return;
11529
9f554590
MC
11530 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11531 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11532 txr->tx_cons);
11533}
11534
11535static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11536{
b6ab4b01 11537 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
11538 int i = bnapi->index;
11539
3b2b7d9d
MC
11540 if (!rxr)
11541 return;
11542
9f554590
MC
11543 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11544 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11545 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11546 rxr->rx_sw_agg_prod);
11547}
11548
11549static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11550{
11551 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11552 int i = bnapi->index;
11553
11554 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11555 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11556}
11557
c0c050c5
MC
11558static void bnxt_dbg_dump_states(struct bnxt *bp)
11559{
11560 int i;
11561 struct bnxt_napi *bnapi;
c0c050c5
MC
11562
11563 for (i = 0; i < bp->cp_nr_rings; i++) {
11564 bnapi = bp->bnapi[i];
c0c050c5 11565 if (netif_msg_drv(bp)) {
9f554590
MC
11566 bnxt_dump_tx_sw_state(bnapi);
11567 bnxt_dump_rx_sw_state(bnapi);
11568 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
11569 }
11570 }
11571}
11572
8fbf58e1
MC
11573static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11574{
11575 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
bbf33d1d 11576 struct hwrm_ring_reset_input *req;
8fbf58e1
MC
11577 struct bnxt_napi *bnapi = rxr->bnapi;
11578 struct bnxt_cp_ring_info *cpr;
11579 u16 cp_ring_id;
bbf33d1d
EP
11580 int rc;
11581
11582 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11583 if (rc)
11584 return rc;
8fbf58e1
MC
11585
11586 cpr = &bnapi->cp_ring;
11587 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
bbf33d1d
EP
11588 req->cmpl_ring = cpu_to_le16(cp_ring_id);
11589 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11590 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11591 return hwrm_req_send_silent(bp, req);
8fbf58e1
MC
11592}
11593
6988bd92 11594static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 11595{
6988bd92
MC
11596 if (!silent)
11597 bnxt_dbg_dump_states(bp);
028de140 11598 if (netif_running(bp->dev)) {
b386cd36
MC
11599 int rc;
11600
aa46dfff
VV
11601 if (silent) {
11602 bnxt_close_nic(bp, false, false);
11603 bnxt_open_nic(bp, false, false);
11604 } else {
b386cd36 11605 bnxt_ulp_stop(bp);
aa46dfff
VV
11606 bnxt_close_nic(bp, true, false);
11607 rc = bnxt_open_nic(bp, true, false);
11608 bnxt_ulp_start(bp, rc);
11609 }
028de140 11610 }
c0c050c5
MC
11611}
11612
0290bd29 11613static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
c0c050c5
MC
11614{
11615 struct bnxt *bp = netdev_priv(dev);
11616
11617 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11618 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 11619 bnxt_queue_sp_work(bp);
c0c050c5
MC
11620}
11621
acfb50e4
VV
11622static void bnxt_fw_health_check(struct bnxt *bp)
11623{
11624 struct bnxt_fw_health *fw_health = bp->fw_health;
11625 u32 val;
11626
0797c10d 11627 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
acfb50e4
VV
11628 return;
11629
1b2b9183
MC
11630 /* Make sure it is enabled before checking the tmr_counter. */
11631 smp_rmb();
acfb50e4
VV
11632 if (fw_health->tmr_counter) {
11633 fw_health->tmr_counter--;
11634 return;
11635 }
11636
11637 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
8cc95ceb
EP
11638 if (val == fw_health->last_fw_heartbeat) {
11639 fw_health->arrests++;
acfb50e4 11640 goto fw_reset;
8cc95ceb 11641 }
acfb50e4
VV
11642
11643 fw_health->last_fw_heartbeat = val;
11644
11645 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
8cc95ceb
EP
11646 if (val != fw_health->last_fw_reset_cnt) {
11647 fw_health->discoveries++;
acfb50e4 11648 goto fw_reset;
8cc95ceb 11649 }
acfb50e4
VV
11650
11651 fw_health->tmr_counter = fw_health->tmr_multiplier;
11652 return;
11653
11654fw_reset:
11655 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11656 bnxt_queue_sp_work(bp);
11657}
11658
e99e88a9 11659static void bnxt_timer(struct timer_list *t)
c0c050c5 11660{
e99e88a9 11661 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
11662 struct net_device *dev = bp->dev;
11663
e0009404 11664 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
11665 return;
11666
11667 if (atomic_read(&bp->intr_sem) != 0)
11668 goto bnxt_restart_timer;
11669
acfb50e4
VV
11670 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11671 bnxt_fw_health_check(bp);
11672
0f5a4841 11673 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
3bdf56c4 11674 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 11675 bnxt_queue_sp_work(bp);
3bdf56c4 11676 }
5a84acbe
SP
11677
11678 if (bnxt_tc_flower_enabled(bp)) {
11679 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11680 bnxt_queue_sp_work(bp);
11681 }
a1ef4a79 11682
87d67f59
PC
11683#ifdef CONFIG_RFS_ACCEL
11684 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11685 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11686 bnxt_queue_sp_work(bp);
11687 }
11688#endif /*CONFIG_RFS_ACCEL*/
11689
a1ef4a79
MC
11690 if (bp->link_info.phy_retry) {
11691 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
acda6180 11692 bp->link_info.phy_retry = false;
a1ef4a79
MC
11693 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11694 } else {
11695 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11696 bnxt_queue_sp_work(bp);
11697 }
11698 }
ffd77621 11699
662c9b22
EP
11700 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
11701 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
11702 bnxt_queue_sp_work(bp);
11703 }
11704
5313845f
MC
11705 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11706 netif_carrier_ok(dev)) {
ffd77621
MC
11707 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11708 bnxt_queue_sp_work(bp);
11709 }
c0c050c5
MC
11710bnxt_restart_timer:
11711 mod_timer(&bp->timer, jiffies + bp->current_interval);
11712}
11713
a551ee94 11714static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 11715{
a551ee94
MC
11716 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11717 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
11718 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11719 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11720 */
11721 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11722 rtnl_lock();
a551ee94
MC
11723}
11724
11725static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11726{
6988bd92
MC
11727 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11728 rtnl_unlock();
11729}
11730
a551ee94
MC
11731/* Only called from bnxt_sp_task() */
11732static void bnxt_reset(struct bnxt *bp, bool silent)
11733{
11734 bnxt_rtnl_lock_sp(bp);
11735 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11736 bnxt_reset_task(bp, silent);
11737 bnxt_rtnl_unlock_sp(bp);
11738}
11739
8fbf58e1
MC
11740/* Only called from bnxt_sp_task() */
11741static void bnxt_rx_ring_reset(struct bnxt *bp)
11742{
11743 int i;
11744
11745 bnxt_rtnl_lock_sp(bp);
11746 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11747 bnxt_rtnl_unlock_sp(bp);
11748 return;
11749 }
11750 /* Disable and flush TPA before resetting the RX ring */
11751 if (bp->flags & BNXT_FLAG_TPA)
11752 bnxt_set_tpa(bp, false);
11753 for (i = 0; i < bp->rx_nr_rings; i++) {
11754 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11755 struct bnxt_cp_ring_info *cpr;
11756 int rc;
11757
11758 if (!rxr->bnapi->in_reset)
11759 continue;
11760
11761 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11762 if (rc) {
11763 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11764 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11765 else
11766 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11767 rc);
8fb35cd3 11768 bnxt_reset_task(bp, true);
8fbf58e1
MC
11769 break;
11770 }
11771 bnxt_free_one_rx_ring_skbs(bp, i);
11772 rxr->rx_prod = 0;
11773 rxr->rx_agg_prod = 0;
11774 rxr->rx_sw_agg_prod = 0;
11775 rxr->rx_next_cons = 0;
11776 rxr->bnapi->in_reset = false;
11777 bnxt_alloc_one_rx_ring(bp, i);
11778 cpr = &rxr->bnapi->cp_ring;
8a27d4b9 11779 cpr->sw_stats.rx.rx_resets++;
8fbf58e1
MC
11780 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11781 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11782 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11783 }
11784 if (bp->flags & BNXT_FLAG_TPA)
11785 bnxt_set_tpa(bp, true);
11786 bnxt_rtnl_unlock_sp(bp);
11787}
11788
230d1f0d
MC
11789static void bnxt_fw_reset_close(struct bnxt *bp)
11790{
f3a6d206 11791 bnxt_ulp_stop(bp);
4f036b2e
MC
11792 /* When firmware is in fatal state, quiesce device and disable
11793 * bus master to prevent any potential bad DMAs before freeing
11794 * kernel memory.
d4073028 11795 */
4f036b2e 11796 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
dab62e7c
MC
11797 u16 val = 0;
11798
11799 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11800 if (val == 0xffff)
11801 bp->fw_reset_min_dsecs = 0;
4f036b2e
MC
11802 bnxt_tx_disable(bp);
11803 bnxt_disable_napi(bp);
11804 bnxt_disable_int_sync(bp);
11805 bnxt_free_irq(bp);
11806 bnxt_clear_int_mode(bp);
d4073028 11807 pci_disable_device(bp->pdev);
4f036b2e 11808 }
230d1f0d 11809 __bnxt_close_nic(bp, true, false);
ac797ced 11810 bnxt_vf_reps_free(bp);
230d1f0d
MC
11811 bnxt_clear_int_mode(bp);
11812 bnxt_hwrm_func_drv_unrgtr(bp);
d4073028
VV
11813 if (pci_is_enabled(bp->pdev))
11814 pci_disable_device(bp->pdev);
230d1f0d
MC
11815 bnxt_free_ctx_mem(bp);
11816 kfree(bp->ctx);
11817 bp->ctx = NULL;
11818}
11819
acfb50e4
VV
11820static bool is_bnxt_fw_ok(struct bnxt *bp)
11821{
11822 struct bnxt_fw_health *fw_health = bp->fw_health;
11823 bool no_heartbeat = false, has_reset = false;
11824 u32 val;
11825
11826 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11827 if (val == fw_health->last_fw_heartbeat)
11828 no_heartbeat = true;
11829
11830 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11831 if (val != fw_health->last_fw_reset_cnt)
11832 has_reset = true;
11833
11834 if (!no_heartbeat && has_reset)
11835 return true;
11836
11837 return false;
11838}
11839
d1db9e16
MC
11840/* rtnl_lock is acquired before calling this function */
11841static void bnxt_force_fw_reset(struct bnxt *bp)
11842{
11843 struct bnxt_fw_health *fw_health = bp->fw_health;
30e96f48 11844 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
d1db9e16
MC
11845 u32 wait_dsecs;
11846
11847 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11848 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11849 return;
11850
30e96f48
MC
11851 if (ptp) {
11852 spin_lock_bh(&ptp->ptp_lock);
11853 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11854 spin_unlock_bh(&ptp->ptp_lock);
11855 } else {
11856 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11857 }
d1db9e16
MC
11858 bnxt_fw_reset_close(bp);
11859 wait_dsecs = fw_health->master_func_wait_dsecs;
1596847d 11860 if (fw_health->primary) {
d1db9e16
MC
11861 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11862 wait_dsecs = 0;
11863 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11864 } else {
11865 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11866 wait_dsecs = fw_health->normal_func_wait_dsecs;
11867 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11868 }
4037eb71
VV
11869
11870 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
d1db9e16
MC
11871 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11872 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11873}
11874
11875void bnxt_fw_exception(struct bnxt *bp)
11876{
a2b31e27 11877 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
d1db9e16
MC
11878 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11879 bnxt_rtnl_lock_sp(bp);
11880 bnxt_force_fw_reset(bp);
11881 bnxt_rtnl_unlock_sp(bp);
11882}
11883
e72cb7d6
MC
11884/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11885 * < 0 on error.
11886 */
11887static int bnxt_get_registered_vfs(struct bnxt *bp)
230d1f0d 11888{
e72cb7d6 11889#ifdef CONFIG_BNXT_SRIOV
230d1f0d
MC
11890 int rc;
11891
e72cb7d6
MC
11892 if (!BNXT_PF(bp))
11893 return 0;
11894
11895 rc = bnxt_hwrm_func_qcfg(bp);
11896 if (rc) {
11897 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11898 return rc;
11899 }
11900 if (bp->pf.registered_vfs)
11901 return bp->pf.registered_vfs;
11902 if (bp->sriov_cfg)
11903 return 1;
11904#endif
11905 return 0;
11906}
11907
11908void bnxt_fw_reset(struct bnxt *bp)
11909{
230d1f0d
MC
11910 bnxt_rtnl_lock_sp(bp);
11911 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11912 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
30e96f48 11913 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4037eb71 11914 int n = 0, tmo;
e72cb7d6 11915
30e96f48
MC
11916 if (ptp) {
11917 spin_lock_bh(&ptp->ptp_lock);
11918 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11919 spin_unlock_bh(&ptp->ptp_lock);
11920 } else {
11921 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11922 }
e72cb7d6
MC
11923 if (bp->pf.active_vfs &&
11924 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11925 n = bnxt_get_registered_vfs(bp);
11926 if (n < 0) {
11927 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11928 n);
11929 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11930 dev_close(bp->dev);
11931 goto fw_reset_exit;
11932 } else if (n > 0) {
11933 u16 vf_tmo_dsecs = n * 10;
11934
11935 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11936 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11937 bp->fw_reset_state =
11938 BNXT_FW_RESET_STATE_POLL_VF;
11939 bnxt_queue_fw_reset_work(bp, HZ / 10);
11940 goto fw_reset_exit;
230d1f0d
MC
11941 }
11942 bnxt_fw_reset_close(bp);
4037eb71
VV
11943 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11944 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11945 tmo = HZ / 10;
11946 } else {
11947 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11948 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11949 }
11950 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d
MC
11951 }
11952fw_reset_exit:
11953 bnxt_rtnl_unlock_sp(bp);
11954}
11955
ffd77621
MC
11956static void bnxt_chk_missed_irq(struct bnxt *bp)
11957{
11958 int i;
11959
11960 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11961 return;
11962
11963 for (i = 0; i < bp->cp_nr_rings; i++) {
11964 struct bnxt_napi *bnapi = bp->bnapi[i];
11965 struct bnxt_cp_ring_info *cpr;
11966 u32 fw_ring_id;
11967 int j;
11968
11969 if (!bnapi)
11970 continue;
11971
11972 cpr = &bnapi->cp_ring;
11973 for (j = 0; j < 2; j++) {
11974 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11975 u32 val[2];
11976
11977 if (!cpr2 || cpr2->has_more_work ||
11978 !bnxt_has_work(bp, cpr2))
11979 continue;
11980
11981 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11982 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11983 continue;
11984 }
11985 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11986 bnxt_dbg_hwrm_ring_info_get(bp,
11987 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11988 fw_ring_id, &val[0], &val[1]);
9d8b5f05 11989 cpr->sw_stats.cmn.missed_irqs++;
ffd77621
MC
11990 }
11991 }
11992}
11993
c0c050c5
MC
11994static void bnxt_cfg_ntp_filters(struct bnxt *);
11995
8119e49b
MC
11996static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11997{
11998 struct bnxt_link_info *link_info = &bp->link_info;
11999
12000 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
12001 link_info->autoneg = BNXT_AUTONEG_SPEED;
12002 if (bp->hwrm_spec_code >= 0x10201) {
12003 if (link_info->auto_pause_setting &
12004 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
12005 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12006 } else {
12007 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12008 }
12009 link_info->advertising = link_info->auto_link_speeds;
d058426e 12010 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
8119e49b
MC
12011 } else {
12012 link_info->req_link_speed = link_info->force_link_speed;
d058426e
EP
12013 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
12014 if (link_info->force_pam4_link_speed) {
12015 link_info->req_link_speed =
12016 link_info->force_pam4_link_speed;
12017 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
12018 }
8119e49b
MC
12019 link_info->req_duplex = link_info->duplex_setting;
12020 }
12021 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
12022 link_info->req_flow_ctrl =
12023 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
12024 else
12025 link_info->req_flow_ctrl = link_info->force_pause_setting;
12026}
12027
df97b34d
MC
12028static void bnxt_fw_echo_reply(struct bnxt *bp)
12029{
12030 struct bnxt_fw_health *fw_health = bp->fw_health;
bbf33d1d
EP
12031 struct hwrm_func_echo_response_input *req;
12032 int rc;
df97b34d 12033
bbf33d1d
EP
12034 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
12035 if (rc)
12036 return;
12037 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
12038 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
12039 hwrm_req_send(bp, req);
df97b34d
MC
12040}
12041
c0c050c5
MC
12042static void bnxt_sp_task(struct work_struct *work)
12043{
12044 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 12045
4cebdcec
MC
12046 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12047 smp_mb__after_atomic();
12048 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12049 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 12050 return;
4cebdcec 12051 }
c0c050c5
MC
12052
12053 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
12054 bnxt_cfg_rx_mode(bp);
12055
12056 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
12057 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
12058 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
12059 bnxt_hwrm_exec_fwd_req(bp);
00db3cba 12060 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
531d1d26
MC
12061 bnxt_hwrm_port_qstats(bp, 0);
12062 bnxt_hwrm_port_qstats_ext(bp, 0);
fea6b333 12063 bnxt_accumulate_all_stats(bp);
00db3cba 12064 }
3bdf56c4 12065
0eaa24b9 12066 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 12067 int rc;
0eaa24b9 12068
e2dc9b6e 12069 mutex_lock(&bp->link_lock);
0eaa24b9
MC
12070 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
12071 &bp->sp_event))
12072 bnxt_hwrm_phy_qcaps(bp);
12073
e2dc9b6e 12074 rc = bnxt_update_link(bp, true);
0eaa24b9
MC
12075 if (rc)
12076 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
12077 rc);
ca0c7538
VV
12078
12079 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
12080 &bp->sp_event))
12081 bnxt_init_ethtool_link_settings(bp);
12082 mutex_unlock(&bp->link_lock);
0eaa24b9 12083 }
a1ef4a79
MC
12084 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
12085 int rc;
12086
12087 mutex_lock(&bp->link_lock);
12088 rc = bnxt_update_phy_setting(bp);
12089 mutex_unlock(&bp->link_lock);
12090 if (rc) {
12091 netdev_warn(bp->dev, "update phy settings retry failed\n");
12092 } else {
12093 bp->link_info.phy_retry = false;
12094 netdev_info(bp->dev, "update phy settings retry succeeded\n");
12095 }
12096 }
90c694bb 12097 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
12098 mutex_lock(&bp->link_lock);
12099 bnxt_get_port_module_status(bp);
12100 mutex_unlock(&bp->link_lock);
90c694bb 12101 }
5a84acbe
SP
12102
12103 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
12104 bnxt_tc_flow_stats_work(bp);
12105
ffd77621
MC
12106 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
12107 bnxt_chk_missed_irq(bp);
12108
df97b34d
MC
12109 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
12110 bnxt_fw_echo_reply(bp);
12111
e2dc9b6e
MC
12112 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
12113 * must be the last functions to be called before exiting.
12114 */
6988bd92
MC
12115 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
12116 bnxt_reset(bp, false);
4cebdcec 12117
fc0f1929
MC
12118 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
12119 bnxt_reset(bp, true);
12120
8fbf58e1
MC
12121 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
12122 bnxt_rx_ring_reset(bp);
12123
aadb0b1a
EP
12124 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
12125 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
12126 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
12127 bnxt_devlink_health_fw_report(bp);
12128 else
12129 bnxt_fw_reset(bp);
12130 }
657a33c8 12131
acfb50e4
VV
12132 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
12133 if (!is_bnxt_fw_ok(bp))
aadb0b1a 12134 bnxt_devlink_health_fw_report(bp);
acfb50e4
VV
12135 }
12136
4cebdcec
MC
12137 smp_mb__before_atomic();
12138 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
12139}
12140
d1e7925e 12141/* Under rtnl_lock */
98fdbe73
MC
12142int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
12143 int tx_xdp)
d1e7925e
MC
12144{
12145 int max_rx, max_tx, tx_sets = 1;
780baad4 12146 int tx_rings_needed, stats;
8f23d638 12147 int rx_rings = rx;
6fc2ffdf 12148 int cp, vnics, rc;
d1e7925e 12149
d1e7925e
MC
12150 if (tcs)
12151 tx_sets = tcs;
12152
12153 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
12154 if (rc)
12155 return rc;
12156
12157 if (max_rx < rx)
12158 return -ENOMEM;
12159
5f449249 12160 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
12161 if (max_tx < tx_rings_needed)
12162 return -ENOMEM;
12163
6fc2ffdf 12164 vnics = 1;
9b3d15e6 12165 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
12166 vnics += rx_rings;
12167
8f23d638
MC
12168 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12169 rx_rings <<= 1;
12170 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
12171 stats = cp;
12172 if (BNXT_NEW_RM(bp)) {
11c3ec7b 12173 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
12174 stats += bnxt_get_ulp_stat_ctxs(bp);
12175 }
6fc2ffdf 12176 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 12177 stats, vnics);
d1e7925e
MC
12178}
12179
17086399
SP
12180static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
12181{
12182 if (bp->bar2) {
12183 pci_iounmap(pdev, bp->bar2);
12184 bp->bar2 = NULL;
12185 }
12186
12187 if (bp->bar1) {
12188 pci_iounmap(pdev, bp->bar1);
12189 bp->bar1 = NULL;
12190 }
12191
12192 if (bp->bar0) {
12193 pci_iounmap(pdev, bp->bar0);
12194 bp->bar0 = NULL;
12195 }
12196}
12197
12198static void bnxt_cleanup_pci(struct bnxt *bp)
12199{
12200 bnxt_unmap_bars(bp, bp->pdev);
12201 pci_release_regions(bp->pdev);
f6824308
VV
12202 if (pci_is_enabled(bp->pdev))
12203 pci_disable_device(bp->pdev);
17086399
SP
12204}
12205
18775aa8
MC
12206static void bnxt_init_dflt_coal(struct bnxt *bp)
12207{
df78ea22 12208 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
18775aa8 12209 struct bnxt_coal *coal;
df78ea22
MC
12210 u16 flags = 0;
12211
12212 if (coal_cap->cmpl_params &
12213 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
12214 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
18775aa8
MC
12215
12216 /* Tick values in micro seconds.
12217 * 1 coal_buf x bufs_per_record = 1 completion record.
12218 */
12219 coal = &bp->rx_coal;
0c2ff8d7 12220 coal->coal_ticks = 10;
18775aa8
MC
12221 coal->coal_bufs = 30;
12222 coal->coal_ticks_irq = 1;
12223 coal->coal_bufs_irq = 2;
05abe4dd 12224 coal->idle_thresh = 50;
18775aa8
MC
12225 coal->bufs_per_record = 2;
12226 coal->budget = 64; /* NAPI budget */
df78ea22 12227 coal->flags = flags;
18775aa8
MC
12228
12229 coal = &bp->tx_coal;
12230 coal->coal_ticks = 28;
12231 coal->coal_bufs = 30;
12232 coal->coal_ticks_irq = 2;
12233 coal->coal_bufs_irq = 2;
12234 coal->bufs_per_record = 1;
df78ea22 12235 coal->flags = flags;
18775aa8
MC
12236
12237 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12238}
12239
7c380918
MC
12240static int bnxt_fw_init_one_p1(struct bnxt *bp)
12241{
12242 int rc;
12243
12244 bp->fw_cap = 0;
12245 rc = bnxt_hwrm_ver_get(bp);
ba02629f
EP
12246 bnxt_try_map_fw_health_reg(bp);
12247 if (rc) {
b187e4ba
EP
12248 rc = bnxt_try_recover_fw(bp);
12249 if (rc)
12250 return rc;
12251 rc = bnxt_hwrm_ver_get(bp);
87f7ab8d
EP
12252 if (rc)
12253 return rc;
ba02629f 12254 }
7c380918 12255
4933f675
VV
12256 bnxt_nvm_cfg_ver_get(bp);
12257
7c380918
MC
12258 rc = bnxt_hwrm_func_reset(bp);
12259 if (rc)
12260 return -ENODEV;
12261
12262 bnxt_hwrm_fw_set_time(bp);
12263 return 0;
12264}
12265
12266static int bnxt_fw_init_one_p2(struct bnxt *bp)
12267{
12268 int rc;
12269
12270 /* Get the MAX capabilities for this function */
12271 rc = bnxt_hwrm_func_qcaps(bp);
12272 if (rc) {
12273 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12274 rc);
12275 return -ENODEV;
12276 }
12277
12278 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12279 if (rc)
12280 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12281 rc);
12282
3e9ec2bb
EP
12283 if (bnxt_alloc_fw_health(bp)) {
12284 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12285 } else {
12286 rc = bnxt_hwrm_error_recovery_qcfg(bp);
12287 if (rc)
12288 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12289 rc);
12290 }
07f83d72 12291
2e882468 12292 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
7c380918
MC
12293 if (rc)
12294 return -ENODEV;
12295
12296 bnxt_hwrm_func_qcfg(bp);
12297 bnxt_hwrm_vnic_qcaps(bp);
12298 bnxt_hwrm_port_led_qcaps(bp);
12299 bnxt_ethtool_init(bp);
12300 bnxt_dcb_init(bp);
12301 return 0;
12302}
12303
ba642ab7
MC
12304static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12305{
12306 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12307 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12308 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
12309 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
12310 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
98a4322b
EP
12311 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
12312 bp->rss_hash_delta = bp->rss_hash_cfg;
c66c06c5 12313 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
ba642ab7
MC
12314 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12315 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12316 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
12317 }
12318}
12319
12320static void bnxt_set_dflt_rfs(struct bnxt *bp)
12321{
12322 struct net_device *dev = bp->dev;
12323
12324 dev->hw_features &= ~NETIF_F_NTUPLE;
12325 dev->features &= ~NETIF_F_NTUPLE;
12326 bp->flags &= ~BNXT_FLAG_RFS;
12327 if (bnxt_rfs_supported(bp)) {
12328 dev->hw_features |= NETIF_F_NTUPLE;
12329 if (bnxt_rfs_capable(bp)) {
12330 bp->flags |= BNXT_FLAG_RFS;
12331 dev->features |= NETIF_F_NTUPLE;
12332 }
12333 }
12334}
12335
12336static void bnxt_fw_init_one_p3(struct bnxt *bp)
12337{
12338 struct pci_dev *pdev = bp->pdev;
12339
12340 bnxt_set_dflt_rss_hash_type(bp);
12341 bnxt_set_dflt_rfs(bp);
12342
12343 bnxt_get_wol_settings(bp);
12344 if (bp->flags & BNXT_FLAG_WOL_CAP)
12345 device_set_wakeup_enable(&pdev->dev, bp->wol);
12346 else
12347 device_set_wakeup_capable(&pdev->dev, false);
12348
12349 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12350 bnxt_hwrm_coal_params_qcaps(bp);
12351}
12352
0afd6a4e
MC
12353static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12354
228ea8c1 12355int bnxt_fw_init_one(struct bnxt *bp)
ec5d31e3
MC
12356{
12357 int rc;
12358
12359 rc = bnxt_fw_init_one_p1(bp);
12360 if (rc) {
12361 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12362 return rc;
12363 }
12364 rc = bnxt_fw_init_one_p2(bp);
12365 if (rc) {
12366 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12367 return rc;
12368 }
0afd6a4e
MC
12369 rc = bnxt_probe_phy(bp, false);
12370 if (rc)
12371 return rc;
ec5d31e3
MC
12372 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12373 if (rc)
12374 return rc;
937f188c 12375
ec5d31e3
MC
12376 bnxt_fw_init_one_p3(bp);
12377 return 0;
12378}
12379
cbb51067
MC
12380static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12381{
12382 struct bnxt_fw_health *fw_health = bp->fw_health;
12383 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12384 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12385 u32 reg_type, reg_off, delay_msecs;
12386
12387 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12388 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12389 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12390 switch (reg_type) {
12391 case BNXT_FW_HEALTH_REG_TYPE_CFG:
12392 pci_write_config_dword(bp->pdev, reg_off, val);
12393 break;
12394 case BNXT_FW_HEALTH_REG_TYPE_GRC:
12395 writel(reg_off & BNXT_GRC_BASE_MASK,
12396 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12397 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
df561f66 12398 fallthrough;
cbb51067
MC
12399 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12400 writel(val, bp->bar0 + reg_off);
12401 break;
12402 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12403 writel(val, bp->bar1 + reg_off);
12404 break;
12405 }
12406 if (delay_msecs) {
12407 pci_read_config_dword(bp->pdev, 0, &val);
12408 msleep(delay_msecs);
12409 }
12410}
12411
892a662f
EP
12412bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12413{
12414 struct hwrm_func_qcfg_output *resp;
12415 struct hwrm_func_qcfg_input *req;
12416 bool result = true; /* firmware will enforce if unknown */
12417
12418 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12419 return result;
12420
12421 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12422 return result;
12423
12424 req->fid = cpu_to_le16(0xffff);
12425 resp = hwrm_req_hold(bp, req);
12426 if (!hwrm_req_send(bp, req))
12427 result = !!(le16_to_cpu(resp->flags) &
12428 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
12429 hwrm_req_drop(bp, req);
12430 return result;
12431}
12432
cbb51067
MC
12433static void bnxt_reset_all(struct bnxt *bp)
12434{
12435 struct bnxt_fw_health *fw_health = bp->fw_health;
e07ab202
VV
12436 int i, rc;
12437
12438 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
87f7ab8d 12439 bnxt_fw_reset_via_optee(bp);
e07ab202 12440 bp->fw_reset_timestamp = jiffies;
e07ab202
VV
12441 return;
12442 }
cbb51067
MC
12443
12444 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12445 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12446 bnxt_fw_reset_writel(bp, i);
12447 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
bbf33d1d
EP
12448 struct hwrm_fw_reset_input *req;
12449
12450 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12451 if (!rc) {
12452 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12453 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12454 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12455 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12456 rc = hwrm_req_send(bp, req);
12457 }
a2f3835c 12458 if (rc != -ENODEV)
cbb51067
MC
12459 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12460 }
12461 bp->fw_reset_timestamp = jiffies;
12462}
12463
339eeb4b
MC
12464static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12465{
12466 return time_after(jiffies, bp->fw_reset_timestamp +
12467 (bp->fw_reset_max_dsecs * HZ / 10));
12468}
12469
3958b1da
SK
12470static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12471{
12472 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12473 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12474 bnxt_ulp_start(bp, rc);
aadb0b1a 12475 bnxt_dl_health_fw_status_update(bp, false);
3958b1da
SK
12476 }
12477 bp->fw_reset_state = 0;
12478 dev_close(bp->dev);
12479}
12480
230d1f0d
MC
12481static void bnxt_fw_reset_task(struct work_struct *work)
12482{
12483 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
3958b1da 12484 int rc = 0;
230d1f0d
MC
12485
12486 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12487 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12488 return;
12489 }
12490
12491 switch (bp->fw_reset_state) {
e72cb7d6
MC
12492 case BNXT_FW_RESET_STATE_POLL_VF: {
12493 int n = bnxt_get_registered_vfs(bp);
4037eb71 12494 int tmo;
e72cb7d6
MC
12495
12496 if (n < 0) {
230d1f0d 12497 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
e72cb7d6 12498 n, jiffies_to_msecs(jiffies -
230d1f0d
MC
12499 bp->fw_reset_timestamp));
12500 goto fw_reset_abort;
e72cb7d6 12501 } else if (n > 0) {
339eeb4b 12502 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d
MC
12503 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12504 bp->fw_reset_state = 0;
e72cb7d6
MC
12505 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12506 n);
230d1f0d
MC
12507 return;
12508 }
12509 bnxt_queue_fw_reset_work(bp, HZ / 10);
12510 return;
12511 }
12512 bp->fw_reset_timestamp = jiffies;
12513 rtnl_lock();
6cd657cb 12514 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
3958b1da 12515 bnxt_fw_reset_abort(bp, rc);
6cd657cb 12516 rtnl_unlock();
3958b1da 12517 return;
6cd657cb 12518 }
230d1f0d 12519 bnxt_fw_reset_close(bp);
4037eb71
VV
12520 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12521 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12522 tmo = HZ / 10;
12523 } else {
12524 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12525 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12526 }
230d1f0d 12527 rtnl_unlock();
4037eb71 12528 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d 12529 return;
e72cb7d6 12530 }
4037eb71
VV
12531 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12532 u32 val;
12533
12534 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12535 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
339eeb4b 12536 !bnxt_fw_reset_timeout(bp)) {
4037eb71
VV
12537 bnxt_queue_fw_reset_work(bp, HZ / 5);
12538 return;
12539 }
12540
1596847d 12541 if (!bp->fw_health->primary) {
4037eb71
VV
12542 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12543
12544 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12545 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12546 return;
12547 }
12548 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12549 }
df561f66 12550 fallthrough;
c6a9e7aa 12551 case BNXT_FW_RESET_STATE_RESET_FW:
cbb51067
MC
12552 bnxt_reset_all(bp);
12553 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
c6a9e7aa 12554 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
cbb51067 12555 return;
230d1f0d 12556 case BNXT_FW_RESET_STATE_ENABLE_DEV:
43a440c4 12557 bnxt_inv_fw_health_reg(bp);
bae8a003
VV
12558 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12559 !bp->fw_reset_min_dsecs) {
12560 u16 val;
12561
12562 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12563 if (val == 0xffff) {
12564 if (bnxt_fw_reset_timeout(bp)) {
12565 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
3958b1da 12566 rc = -ETIMEDOUT;
bae8a003 12567 goto fw_reset_abort;
dab62e7c 12568 }
bae8a003
VV
12569 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12570 return;
dab62e7c 12571 }
d1db9e16 12572 }
b4fff207 12573 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
aadb0b1a 12574 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
8f6c5e4d
EP
12575 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12576 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12577 bnxt_dl_remote_reload(bp);
230d1f0d
MC
12578 if (pci_enable_device(bp->pdev)) {
12579 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
3958b1da 12580 rc = -ENODEV;
230d1f0d
MC
12581 goto fw_reset_abort;
12582 }
12583 pci_set_master(bp->pdev);
12584 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
df561f66 12585 fallthrough;
230d1f0d
MC
12586 case BNXT_FW_RESET_STATE_POLL_FW:
12587 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
7b370ad7 12588 rc = bnxt_hwrm_poll(bp);
230d1f0d 12589 if (rc) {
339eeb4b 12590 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d 12591 netdev_err(bp->dev, "Firmware reset aborted\n");
fc8864e0 12592 goto fw_reset_abort_status;
230d1f0d
MC
12593 }
12594 bnxt_queue_fw_reset_work(bp, HZ / 5);
12595 return;
12596 }
12597 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12598 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
df561f66 12599 fallthrough;
230d1f0d
MC
12600 case BNXT_FW_RESET_STATE_OPENING:
12601 while (!rtnl_trylock()) {
12602 bnxt_queue_fw_reset_work(bp, HZ / 10);
12603 return;
12604 }
12605 rc = bnxt_open(bp->dev);
12606 if (rc) {
3958b1da
SK
12607 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12608 bnxt_fw_reset_abort(bp, rc);
12609 rtnl_unlock();
12610 return;
230d1f0d 12611 }
230d1f0d 12612
eca4cf12
MC
12613 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12614 bp->fw_health->enabled) {
12615 bp->fw_health->last_fw_reset_cnt =
12616 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12617 }
230d1f0d
MC
12618 bp->fw_reset_state = 0;
12619 /* Make sure fw_reset_state is 0 before clearing the flag */
12620 smp_mb__before_atomic();
12621 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
758684e4
SK
12622 bnxt_ulp_start(bp, 0);
12623 bnxt_reenable_sriov(bp);
ac797ced
SB
12624 bnxt_vf_reps_alloc(bp);
12625 bnxt_vf_reps_open(bp);
9e518f25 12626 bnxt_ptp_reapply_pps(bp);
8f6c5e4d 12627 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
aadb0b1a
EP
12628 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12629 bnxt_dl_health_fw_recovery_done(bp);
12630 bnxt_dl_health_fw_status_update(bp, true);
12631 }
f3a6d206 12632 rtnl_unlock();
230d1f0d
MC
12633 break;
12634 }
12635 return;
12636
fc8864e0
MC
12637fw_reset_abort_status:
12638 if (bp->fw_health->status_reliable ||
12639 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12640 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12641
12642 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12643 }
230d1f0d 12644fw_reset_abort:
230d1f0d 12645 rtnl_lock();
3958b1da 12646 bnxt_fw_reset_abort(bp, rc);
230d1f0d
MC
12647 rtnl_unlock();
12648}
12649
c0c050c5
MC
12650static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12651{
12652 int rc;
12653 struct bnxt *bp = netdev_priv(dev);
12654
12655 SET_NETDEV_DEV(dev, &pdev->dev);
12656
12657 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12658 rc = pci_enable_device(pdev);
12659 if (rc) {
12660 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12661 goto init_err;
12662 }
12663
12664 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12665 dev_err(&pdev->dev,
12666 "Cannot find PCI device base address, aborting\n");
12667 rc = -ENODEV;
12668 goto init_err_disable;
12669 }
12670
12671 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12672 if (rc) {
12673 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12674 goto init_err_disable;
12675 }
12676
12677 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12678 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12679 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
3383176e 12680 rc = -EIO;
c54bc3ce 12681 goto init_err_release;
c0c050c5
MC
12682 }
12683
12684 pci_set_master(pdev);
12685
12686 bp->dev = dev;
12687 bp->pdev = pdev;
12688
8ae24738
MC
12689 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12690 * determines the BAR size.
12691 */
c0c050c5
MC
12692 bp->bar0 = pci_ioremap_bar(pdev, 0);
12693 if (!bp->bar0) {
12694 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12695 rc = -ENOMEM;
12696 goto init_err_release;
12697 }
12698
c0c050c5
MC
12699 bp->bar2 = pci_ioremap_bar(pdev, 4);
12700 if (!bp->bar2) {
12701 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12702 rc = -ENOMEM;
12703 goto init_err_release;
12704 }
12705
6316ea6d
SB
12706 pci_enable_pcie_error_reporting(pdev);
12707
c0c050c5 12708 INIT_WORK(&bp->sp_task, bnxt_sp_task);
230d1f0d 12709 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
c0c050c5
MC
12710
12711 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
12712#if BITS_PER_LONG == 32
12713 spin_lock_init(&bp->db_lock);
12714#endif
c0c050c5
MC
12715
12716 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12717 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12718
e99e88a9 12719 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
12720 bp->current_interval = BNXT_TIMER_INTERVAL;
12721
442a35a5
JK
12722 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12723 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12724
caefe526 12725 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
12726 return 0;
12727
12728init_err_release:
17086399 12729 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
12730 pci_release_regions(pdev);
12731
12732init_err_disable:
12733 pci_disable_device(pdev);
12734
12735init_err:
12736 return rc;
12737}
12738
12739/* rtnl_lock held */
12740static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12741{
12742 struct sockaddr *addr = p;
1fc2cfd0
JH
12743 struct bnxt *bp = netdev_priv(dev);
12744 int rc = 0;
c0c050c5
MC
12745
12746 if (!is_valid_ether_addr(addr->sa_data))
12747 return -EADDRNOTAVAIL;
12748
c1a7bdff
MC
12749 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12750 return 0;
12751
28ea334b 12752 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
12753 if (rc)
12754 return rc;
bdd4347b 12755
a05e4c0a 12756 eth_hw_addr_set(dev, addr->sa_data);
1fc2cfd0
JH
12757 if (netif_running(dev)) {
12758 bnxt_close_nic(bp, false, false);
12759 rc = bnxt_open_nic(bp, false, false);
12760 }
c0c050c5 12761
1fc2cfd0 12762 return rc;
c0c050c5
MC
12763}
12764
12765/* rtnl_lock held */
12766static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12767{
12768 struct bnxt *bp = netdev_priv(dev);
12769
c0c050c5 12770 if (netif_running(dev))
a9b952d2 12771 bnxt_close_nic(bp, true, false);
c0c050c5
MC
12772
12773 dev->mtu = new_mtu;
12774 bnxt_set_ring_params(bp);
12775
12776 if (netif_running(dev))
a9b952d2 12777 return bnxt_open_nic(bp, true, false);
c0c050c5
MC
12778
12779 return 0;
12780}
12781
c5e3deb8 12782int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
12783{
12784 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 12785 bool sh = false;
d1e7925e 12786 int rc;
16e5cc64 12787
c0c050c5 12788 if (tc > bp->max_tc) {
b451c8b6 12789 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
12790 tc, bp->max_tc);
12791 return -EINVAL;
12792 }
12793
12794 if (netdev_get_num_tc(dev) == tc)
12795 return 0;
12796
3ffb6a39
MC
12797 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12798 sh = true;
12799
98fdbe73
MC
12800 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12801 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
12802 if (rc)
12803 return rc;
c0c050c5
MC
12804
12805 /* Needs to close the device and do hw resource re-allocations */
12806 if (netif_running(bp->dev))
12807 bnxt_close_nic(bp, true, false);
12808
12809 if (tc) {
12810 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12811 netdev_set_num_tc(dev, tc);
12812 } else {
12813 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12814 netdev_reset_tc(dev);
12815 }
87e9b377 12816 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
12817 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12818 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
12819
12820 if (netif_running(bp->dev))
12821 return bnxt_open_nic(bp, true, false);
12822
12823 return 0;
12824}
12825
9e0fd15d
JP
12826static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12827 void *cb_priv)
c5e3deb8 12828{
9e0fd15d 12829 struct bnxt *bp = cb_priv;
de4784ca 12830
312324f1
JK
12831 if (!bnxt_tc_flower_enabled(bp) ||
12832 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 12833 return -EOPNOTSUPP;
c5e3deb8 12834
9e0fd15d
JP
12835 switch (type) {
12836 case TC_SETUP_CLSFLOWER:
12837 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12838 default:
12839 return -EOPNOTSUPP;
12840 }
12841}
12842
627c89d0 12843LIST_HEAD(bnxt_block_cb_list);
955bcb6e 12844
2ae7408f
SP
12845static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12846 void *type_data)
12847{
4e95bc26
PNA
12848 struct bnxt *bp = netdev_priv(dev);
12849
2ae7408f 12850 switch (type) {
9e0fd15d 12851 case TC_SETUP_BLOCK:
955bcb6e
PNA
12852 return flow_block_cb_setup_simple(type_data,
12853 &bnxt_block_cb_list,
4e95bc26
PNA
12854 bnxt_setup_tc_block_cb,
12855 bp, bp, true);
575ed7d3 12856 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
12857 struct tc_mqprio_qopt *mqprio = type_data;
12858
12859 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 12860
2ae7408f
SP
12861 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12862 }
12863 default:
12864 return -EOPNOTSUPP;
12865 }
c5e3deb8
MC
12866}
12867
c0c050c5
MC
12868#ifdef CONFIG_RFS_ACCEL
12869static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12870 struct bnxt_ntuple_filter *f2)
12871{
12872 struct flow_keys *keys1 = &f1->fkeys;
12873 struct flow_keys *keys2 = &f2->fkeys;
12874
6fc7caa8
MC
12875 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12876 keys1->basic.ip_proto != keys2->basic.ip_proto)
12877 return false;
12878
12879 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12880 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12881 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12882 return false;
12883 } else {
12884 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12885 sizeof(keys1->addrs.v6addrs.src)) ||
12886 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12887 sizeof(keys1->addrs.v6addrs.dst)))
12888 return false;
12889 }
12890
12891 if (keys1->ports.ports == keys2->ports.ports &&
61aad724 12892 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
12893 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12894 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
12895 return true;
12896
12897 return false;
12898}
12899
12900static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12901 u16 rxq_index, u32 flow_id)
12902{
12903 struct bnxt *bp = netdev_priv(dev);
12904 struct bnxt_ntuple_filter *fltr, *new_fltr;
12905 struct flow_keys *fkeys;
12906 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 12907 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5 12908 struct hlist_head *head;
f47d0e19 12909 u32 flags;
c0c050c5 12910
a54c4d74
MC
12911 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12912 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12913 int off = 0, j;
12914
12915 netif_addr_lock_bh(dev);
12916 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12917 if (ether_addr_equal(eth->h_dest,
12918 vnic->uc_list + off)) {
12919 l2_idx = j + 1;
12920 break;
12921 }
12922 }
12923 netif_addr_unlock_bh(dev);
12924 if (!l2_idx)
12925 return -EINVAL;
12926 }
c0c050c5
MC
12927 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12928 if (!new_fltr)
12929 return -ENOMEM;
12930
12931 fkeys = &new_fltr->fkeys;
12932 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12933 rc = -EPROTONOSUPPORT;
12934 goto err_free;
12935 }
12936
dda0e746
MC
12937 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12938 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
12939 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12940 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12941 rc = -EPROTONOSUPPORT;
12942 goto err_free;
12943 }
dda0e746
MC
12944 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12945 bp->hwrm_spec_code < 0x10601) {
12946 rc = -EPROTONOSUPPORT;
12947 goto err_free;
12948 }
f47d0e19
MC
12949 flags = fkeys->control.flags;
12950 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12951 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
61aad724
MC
12952 rc = -EPROTONOSUPPORT;
12953 goto err_free;
12954 }
c0c050c5 12955
a54c4d74 12956 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
12957 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12958
12959 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12960 head = &bp->ntp_fltr_hash_tbl[idx];
12961 rcu_read_lock();
12962 hlist_for_each_entry_rcu(fltr, head, hash) {
12963 if (bnxt_fltr_match(fltr, new_fltr)) {
02597d39 12964 rc = fltr->sw_id;
c0c050c5 12965 rcu_read_unlock();
c0c050c5
MC
12966 goto err_free;
12967 }
12968 }
12969 rcu_read_unlock();
12970
12971 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
12972 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12973 BNXT_NTP_FLTR_MAX_FLTR, 0);
12974 if (bit_id < 0) {
c0c050c5
MC
12975 spin_unlock_bh(&bp->ntp_fltr_lock);
12976 rc = -ENOMEM;
12977 goto err_free;
12978 }
12979
84e86b98 12980 new_fltr->sw_id = (u16)bit_id;
c0c050c5 12981 new_fltr->flow_id = flow_id;
a54c4d74 12982 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
12983 new_fltr->rxq = rxq_index;
12984 hlist_add_head_rcu(&new_fltr->hash, head);
12985 bp->ntp_fltr_count++;
12986 spin_unlock_bh(&bp->ntp_fltr_lock);
12987
12988 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 12989 bnxt_queue_sp_work(bp);
c0c050c5
MC
12990
12991 return new_fltr->sw_id;
12992
12993err_free:
12994 kfree(new_fltr);
12995 return rc;
12996}
12997
12998static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12999{
13000 int i;
13001
13002 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
13003 struct hlist_head *head;
13004 struct hlist_node *tmp;
13005 struct bnxt_ntuple_filter *fltr;
13006 int rc;
13007
13008 head = &bp->ntp_fltr_hash_tbl[i];
13009 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
13010 bool del = false;
13011
13012 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
13013 if (rps_may_expire_flow(bp->dev, fltr->rxq,
13014 fltr->flow_id,
13015 fltr->sw_id)) {
13016 bnxt_hwrm_cfa_ntuple_filter_free(bp,
13017 fltr);
13018 del = true;
13019 }
13020 } else {
13021 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
13022 fltr);
13023 if (rc)
13024 del = true;
13025 else
13026 set_bit(BNXT_FLTR_VALID, &fltr->state);
13027 }
13028
13029 if (del) {
13030 spin_lock_bh(&bp->ntp_fltr_lock);
13031 hlist_del_rcu(&fltr->hash);
13032 bp->ntp_fltr_count--;
13033 spin_unlock_bh(&bp->ntp_fltr_lock);
13034 synchronize_rcu();
13035 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
13036 kfree(fltr);
13037 }
13038 }
13039 }
19241368 13040 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9a005c38 13041 netdev_info(bp->dev, "Receive PF driver unload event!\n");
c0c050c5
MC
13042}
13043
13044#else
13045
13046static void bnxt_cfg_ntp_filters(struct bnxt *bp)
13047{
13048}
13049
13050#endif /* CONFIG_RFS_ACCEL */
13051
442a35a5 13052static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
c0c050c5 13053{
442a35a5
JK
13054 struct bnxt *bp = netdev_priv(netdev);
13055 struct udp_tunnel_info ti;
13056 unsigned int cmd;
c0c050c5 13057
442a35a5 13058 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
7ae9dc35 13059 if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
442a35a5 13060 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
7ae9dc35 13061 else
442a35a5 13062 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
7cdd5fc3 13063
442a35a5
JK
13064 if (ti.port)
13065 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
ad51b8e9 13066
442a35a5 13067 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
c0c050c5
MC
13068}
13069
442a35a5
JK
13070static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
13071 .sync_table = bnxt_udp_tunnel_sync,
13072 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
13073 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
13074 .tables = {
13075 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
13076 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
13077 },
13078};
c0c050c5 13079
39d8ba2e
MC
13080static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13081 struct net_device *dev, u32 filter_mask,
13082 int nlflags)
13083{
13084 struct bnxt *bp = netdev_priv(dev);
13085
13086 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
13087 nlflags, filter_mask, NULL);
13088}
13089
13090static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 13091 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
13092{
13093 struct bnxt *bp = netdev_priv(dev);
13094 struct nlattr *attr, *br_spec;
13095 int rem, rc = 0;
13096
13097 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
13098 return -EOPNOTSUPP;
13099
13100 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13101 if (!br_spec)
13102 return -EINVAL;
13103
13104 nla_for_each_nested(attr, br_spec, rem) {
13105 u16 mode;
13106
13107 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13108 continue;
13109
13110 if (nla_len(attr) < sizeof(mode))
13111 return -EINVAL;
13112
13113 mode = nla_get_u16(attr);
13114 if (mode == bp->br_mode)
13115 break;
13116
13117 rc = bnxt_hwrm_set_br_mode(bp, mode);
13118 if (!rc)
13119 bp->br_mode = mode;
13120 break;
13121 }
13122 return rc;
13123}
13124
52d5254a
FF
13125int bnxt_get_port_parent_id(struct net_device *dev,
13126 struct netdev_phys_item_id *ppid)
c124a62f 13127{
52d5254a
FF
13128 struct bnxt *bp = netdev_priv(dev);
13129
c124a62f
SP
13130 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
13131 return -EOPNOTSUPP;
13132
13133 /* The PF and it's VF-reps only support the switchdev framework */
d061b241 13134 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
c124a62f
SP
13135 return -EOPNOTSUPP;
13136
b014232f
VV
13137 ppid->id_len = sizeof(bp->dsn);
13138 memcpy(ppid->id, bp->dsn, ppid->id_len);
c124a62f 13139
52d5254a 13140 return 0;
c124a62f
SP
13141}
13142
c0c050c5
MC
13143static const struct net_device_ops bnxt_netdev_ops = {
13144 .ndo_open = bnxt_open,
13145 .ndo_start_xmit = bnxt_start_xmit,
13146 .ndo_stop = bnxt_close,
13147 .ndo_get_stats64 = bnxt_get_stats64,
13148 .ndo_set_rx_mode = bnxt_set_rx_mode,
a7605370 13149 .ndo_eth_ioctl = bnxt_ioctl,
c0c050c5
MC
13150 .ndo_validate_addr = eth_validate_addr,
13151 .ndo_set_mac_address = bnxt_change_mac_addr,
13152 .ndo_change_mtu = bnxt_change_mtu,
13153 .ndo_fix_features = bnxt_fix_features,
13154 .ndo_set_features = bnxt_set_features,
1698d600 13155 .ndo_features_check = bnxt_features_check,
c0c050c5
MC
13156 .ndo_tx_timeout = bnxt_tx_timeout,
13157#ifdef CONFIG_BNXT_SRIOV
13158 .ndo_get_vf_config = bnxt_get_vf_config,
13159 .ndo_set_vf_mac = bnxt_set_vf_mac,
13160 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
13161 .ndo_set_vf_rate = bnxt_set_vf_bw,
13162 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
13163 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 13164 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
13165#endif
13166 .ndo_setup_tc = bnxt_setup_tc,
13167#ifdef CONFIG_RFS_ACCEL
13168 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
13169#endif
f4e63525 13170 .ndo_bpf = bnxt_xdp,
f18c2b77 13171 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
13172 .ndo_bridge_getlink = bnxt_bridge_getlink,
13173 .ndo_bridge_setlink = bnxt_bridge_setlink,
c0c050c5
MC
13174};
13175
13176static void bnxt_remove_one(struct pci_dev *pdev)
13177{
13178 struct net_device *dev = pci_get_drvdata(pdev);
13179 struct bnxt *bp = netdev_priv(dev);
13180
7e334fc8 13181 if (BNXT_PF(bp))
c0c050c5
MC
13182 bnxt_sriov_disable(bp);
13183
a521c8a0 13184 bnxt_ptp_clear(bp);
21d6a11e
VV
13185 pci_disable_pcie_error_reporting(pdev);
13186 unregister_netdev(dev);
b16939b5 13187 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
21d6a11e 13188 /* Flush any pending tasks */
631ce27a
VV
13189 cancel_work_sync(&bp->sp_task);
13190 cancel_delayed_work_sync(&bp->fw_reset_task);
b16939b5
VV
13191 bp->sp_event = 0;
13192
f16a9169 13193 bnxt_dl_fw_reporters_destroy(bp);
cda2cab0 13194 bnxt_dl_unregister(bp);
2ae7408f 13195 bnxt_shutdown_tc(bp);
c0c050c5 13196
7809592d 13197 bnxt_clear_int_mode(bp);
be58a0da 13198 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 13199 bnxt_free_hwrm_resources(bp);
eb513658 13200 bnxt_ethtool_free(bp);
7df4ae9f 13201 bnxt_dcb_free(bp);
a588e458
MC
13202 kfree(bp->edev);
13203 bp->edev = NULL;
ae5c42f0
MC
13204 kfree(bp->ptp_cfg);
13205 bp->ptp_cfg = NULL;
8280b38e
VV
13206 kfree(bp->fw_health);
13207 bp->fw_health = NULL;
c20dc142 13208 bnxt_cleanup_pci(bp);
98f04cf0
MC
13209 bnxt_free_ctx_mem(bp);
13210 kfree(bp->ctx);
13211 bp->ctx = NULL;
1667cbf6
MC
13212 kfree(bp->rss_indir_tbl);
13213 bp->rss_indir_tbl = NULL;
fd3ab1c7 13214 bnxt_free_port_stats(bp);
c0c050c5 13215 free_netdev(dev);
c0c050c5
MC
13216}
13217
ba642ab7 13218static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
c0c050c5
MC
13219{
13220 int rc = 0;
13221 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 13222
b0d28207 13223 bp->phy_flags = 0;
170ce013
MC
13224 rc = bnxt_hwrm_phy_qcaps(bp);
13225 if (rc) {
13226 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13227 rc);
13228 return rc;
13229 }
dade5e15
MC
13230 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13231 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13232 else
13233 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
43a5107d
MC
13234 if (!fw_dflt)
13235 return 0;
13236
3c10ed49 13237 mutex_lock(&bp->link_lock);
c0c050c5
MC
13238 rc = bnxt_update_link(bp, false);
13239 if (rc) {
3c10ed49 13240 mutex_unlock(&bp->link_lock);
c0c050c5
MC
13241 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13242 rc);
13243 return rc;
13244 }
13245
93ed8117
MC
13246 /* Older firmware does not have supported_auto_speeds, so assume
13247 * that all supported speeds can be autonegotiated.
13248 */
13249 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
13250 link_info->support_auto_speeds = link_info->support_speeds;
13251
8119e49b 13252 bnxt_init_ethtool_link_settings(bp);
3c10ed49 13253 mutex_unlock(&bp->link_lock);
ba642ab7 13254 return 0;
c0c050c5
MC
13255}
13256
13257static int bnxt_get_max_irq(struct pci_dev *pdev)
13258{
13259 u16 ctrl;
13260
13261 if (!pdev->msix_cap)
13262 return 1;
13263
13264 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
13265 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
13266}
13267
6e6c5a57
MC
13268static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13269 int *max_cp)
c0c050c5 13270{
6a4f2947 13271 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 13272 int max_ring_grps = 0, max_irq;
c0c050c5 13273
6a4f2947
MC
13274 *max_tx = hw_resc->max_tx_rings;
13275 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
13276 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13277 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13278 bnxt_get_ulp_msix_num(bp),
c027c6b4 13279 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
13280 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13281 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 13282 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
13283 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13284 *max_cp -= 1;
13285 *max_rx -= 2;
13286 }
c0c050c5
MC
13287 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13288 *max_rx >>= 1;
e30fbc33
MC
13289 if (bp->flags & BNXT_FLAG_CHIP_P5) {
13290 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13291 /* On P5 chips, max_cp output param should be available NQs */
13292 *max_cp = max_irq;
13293 }
b72d4a68 13294 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
13295}
13296
13297int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13298{
13299 int rx, tx, cp;
13300
13301 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
13302 *max_rx = rx;
13303 *max_tx = tx;
6e6c5a57
MC
13304 if (!rx || !tx || !cp)
13305 return -ENOMEM;
13306
6e6c5a57
MC
13307 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13308}
13309
e4060d30
MC
13310static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13311 bool shared)
13312{
13313 int rc;
13314
13315 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
13316 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13317 /* Not enough rings, try disabling agg rings. */
13318 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13319 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
13320 if (rc) {
13321 /* set BNXT_FLAG_AGG_RINGS back for consistency */
13322 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 13323 return rc;
07f4fde5 13324 }
bdbd1eb5 13325 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
13326 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13327 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
13328 bnxt_set_ring_params(bp);
13329 }
e4060d30
MC
13330
13331 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13332 int max_cp, max_stat, max_irq;
13333
13334 /* Reserve minimum resources for RoCE */
13335 max_cp = bnxt_get_max_func_cp_rings(bp);
13336 max_stat = bnxt_get_max_func_stat_ctxs(bp);
13337 max_irq = bnxt_get_max_func_irqs(bp);
13338 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
13339 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13340 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13341 return 0;
13342
13343 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13344 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13345 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13346 max_cp = min_t(int, max_cp, max_irq);
13347 max_cp = min_t(int, max_cp, max_stat);
13348 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13349 if (rc)
13350 rc = 0;
13351 }
13352 return rc;
13353}
13354
58ea801a
MC
13355/* In initial default shared ring setting, each shared ring must have a
13356 * RX/TX ring pair.
13357 */
13358static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13359{
13360 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13361 bp->rx_nr_rings = bp->cp_nr_rings;
13362 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13363 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13364}
13365
702c221c 13366static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
13367{
13368 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 13369
2773dfb2
MC
13370 if (!bnxt_can_reserve_rings(bp))
13371 return 0;
13372
6e6c5a57
MC
13373 if (sh)
13374 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 13375 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
13376 /* Reduce default rings on multi-port cards so that total default
13377 * rings do not exceed CPU count.
13378 */
13379 if (bp->port_count > 1) {
13380 int max_rings =
13381 max_t(int, num_online_cpus() / bp->port_count, 1);
13382
13383 dflt_rings = min_t(int, dflt_rings, max_rings);
13384 }
e4060d30 13385 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
13386 if (rc)
13387 return rc;
13388 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13389 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
13390 if (sh)
13391 bnxt_trim_dflt_sh_rings(bp);
13392 else
13393 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13394 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 13395
674f50a5 13396 rc = __bnxt_reserve_rings(bp);
662c9b22 13397 if (rc && rc != -ENODEV)
391be5c2 13398 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
13399 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13400 if (sh)
13401 bnxt_trim_dflt_sh_rings(bp);
391be5c2 13402
674f50a5
MC
13403 /* Rings may have been trimmed, re-reserve the trimmed rings. */
13404 if (bnxt_need_reserve_rings(bp)) {
13405 rc = __bnxt_reserve_rings(bp);
662c9b22 13406 if (rc && rc != -ENODEV)
674f50a5
MC
13407 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13408 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13409 }
76595193
PS
13410 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13411 bp->rx_nr_rings++;
13412 bp->cp_nr_rings++;
13413 }
5d765a5e
VV
13414 if (rc) {
13415 bp->tx_nr_rings = 0;
13416 bp->rx_nr_rings = 0;
13417 }
6e6c5a57 13418 return rc;
c0c050c5
MC
13419}
13420
47558acd
MC
13421static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13422{
13423 int rc;
13424
13425 if (bp->tx_nr_rings)
13426 return 0;
13427
6b95c3e9
MC
13428 bnxt_ulp_irq_stop(bp);
13429 bnxt_clear_int_mode(bp);
47558acd
MC
13430 rc = bnxt_set_dflt_rings(bp, true);
13431 if (rc) {
662c9b22
EP
13432 if (BNXT_VF(bp) && rc == -ENODEV)
13433 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13434 else
13435 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 13436 goto init_dflt_ring_err;
47558acd
MC
13437 }
13438 rc = bnxt_init_int_mode(bp);
13439 if (rc)
6b95c3e9
MC
13440 goto init_dflt_ring_err;
13441
47558acd 13442 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13ba7943
SK
13443
13444 bnxt_set_dflt_rfs(bp);
13445
6b95c3e9
MC
13446init_dflt_ring_err:
13447 bnxt_ulp_irq_restart(bp, rc);
13448 return rc;
47558acd
MC
13449}
13450
80fcaf46 13451int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 13452{
80fcaf46
MC
13453 int rc;
13454
7b08f661
MC
13455 ASSERT_RTNL();
13456 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
13457
13458 if (netif_running(bp->dev))
13459 __bnxt_close_nic(bp, true, false);
13460
ec86f14e 13461 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
13462 bnxt_clear_int_mode(bp);
13463 rc = bnxt_init_int_mode(bp);
ec86f14e 13464 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
13465
13466 if (netif_running(bp->dev)) {
13467 if (rc)
13468 dev_close(bp->dev);
13469 else
13470 rc = bnxt_open_nic(bp, true, false);
13471 }
13472
80fcaf46 13473 return rc;
7b08f661
MC
13474}
13475
a22a6ac2
MC
13476static int bnxt_init_mac_addr(struct bnxt *bp)
13477{
13478 int rc = 0;
13479
13480 if (BNXT_PF(bp)) {
a96d317f 13481 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
a22a6ac2
MC
13482 } else {
13483#ifdef CONFIG_BNXT_SRIOV
13484 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 13485 bool strict_approval = true;
a22a6ac2
MC
13486
13487 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 13488 /* overwrite netdev dev_addr with admin VF MAC */
a96d317f 13489 eth_hw_addr_set(bp->dev, vf->mac_addr);
28ea334b
MC
13490 /* Older PF driver or firmware may not approve this
13491 * correctly.
13492 */
13493 strict_approval = false;
a22a6ac2
MC
13494 } else {
13495 eth_hw_addr_random(bp->dev);
a22a6ac2 13496 }
28ea334b 13497 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
13498#endif
13499 }
13500 return rc;
13501}
13502
a0d0fd70
VV
13503static void bnxt_vpd_read_info(struct bnxt *bp)
13504{
13505 struct pci_dev *pdev = bp->pdev;
0ff25f6a
HK
13506 unsigned int vpd_size, kw_len;
13507 int pos, size;
a0d0fd70
VV
13508 u8 *vpd_data;
13509
550cd7c1
HK
13510 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13511 if (IS_ERR(vpd_data)) {
13512 pci_warn(pdev, "Unable to read VPD\n");
a0d0fd70 13513 return;
4fd13157
DM
13514 }
13515
0ff25f6a
HK
13516 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13517 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
a0d0fd70
VV
13518 if (pos < 0)
13519 goto read_sn;
13520
0ff25f6a 13521 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
492adcf4 13522 memcpy(bp->board_partno, &vpd_data[pos], size);
a0d0fd70
VV
13523
13524read_sn:
0ff25f6a
HK
13525 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13526 PCI_VPD_RO_KEYWORD_SERIALNO,
13527 &kw_len);
a0d0fd70
VV
13528 if (pos < 0)
13529 goto exit;
13530
0ff25f6a 13531 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
492adcf4 13532 memcpy(bp->board_serialno, &vpd_data[pos], size);
a0d0fd70
VV
13533exit:
13534 kfree(vpd_data);
13535}
13536
03213a99
JP
13537static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13538{
13539 struct pci_dev *pdev = bp->pdev;
8d85b75b 13540 u64 qword;
03213a99 13541
8d85b75b
JK
13542 qword = pci_get_dsn(pdev);
13543 if (!qword) {
13544 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
03213a99
JP
13545 return -EOPNOTSUPP;
13546 }
13547
8d85b75b
JK
13548 put_unaligned_le64(qword, dsn);
13549
d061b241 13550 bp->flags |= BNXT_FLAG_DSN_VALID;
03213a99
JP
13551 return 0;
13552}
13553
8ae24738
MC
13554static int bnxt_map_db_bar(struct bnxt *bp)
13555{
13556 if (!bp->db_size)
13557 return -ENODEV;
13558 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13559 if (!bp->bar1)
13560 return -ENOMEM;
13561 return 0;
13562}
13563
c7dd4a5b
EP
13564void bnxt_print_device_info(struct bnxt *bp)
13565{
13566 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13567 board_info[bp->board_idx].name,
13568 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13569
13570 pcie_print_link_status(bp->pdev);
13571}
13572
c0c050c5
MC
13573static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13574{
c0c050c5
MC
13575 struct net_device *dev;
13576 struct bnxt *bp;
6e6c5a57 13577 int rc, max_irqs;
c0c050c5 13578
4e00338a 13579 if (pci_is_bridge(pdev))
fa853dda
PS
13580 return -ENODEV;
13581
8743db4a
VV
13582 /* Clear any pending DMA transactions from crash kernel
13583 * while loading driver in capture kernel.
13584 */
13585 if (is_kdump_kernel()) {
13586 pci_clear_master(pdev);
13587 pcie_flr(pdev);
13588 }
13589
c0c050c5
MC
13590 max_irqs = bnxt_get_max_irq(pdev);
13591 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13592 if (!dev)
13593 return -ENOMEM;
13594
13595 bp = netdev_priv(dev);
c7dd4a5b 13596 bp->board_idx = ent->driver_data;
8fb35cd3 13597 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
9c1fabdf 13598 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5 13599
c7dd4a5b 13600 if (bnxt_vf_pciid(bp->board_idx))
c0c050c5
MC
13601 bp->flags |= BNXT_FLAG_VF;
13602
0020ae2a
VG
13603 /* No devlink port registration in case of a VF */
13604 if (BNXT_PF(bp))
13605 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
13606
2bcfa6f6 13607 if (pdev->msix_cap)
c0c050c5 13608 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
13609
13610 rc = bnxt_init_board(pdev, dev);
13611 if (rc < 0)
13612 goto init_err_free;
13613
13614 dev->netdev_ops = &bnxt_netdev_ops;
13615 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13616 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
13617 pci_set_drvdata(pdev, dev);
13618
3e8060fa
PS
13619 rc = bnxt_alloc_hwrm_resources(bp);
13620 if (rc)
17086399 13621 goto init_err_pci_clean;
3e8060fa
PS
13622
13623 mutex_init(&bp->hwrm_cmd_lock);
ba642ab7 13624 mutex_init(&bp->link_lock);
7c380918
MC
13625
13626 rc = bnxt_fw_init_one_p1(bp);
3e8060fa 13627 if (rc)
17086399 13628 goto init_err_pci_clean;
3e8060fa 13629
3e3c09b0
VV
13630 if (BNXT_PF(bp))
13631 bnxt_vpd_read_info(bp);
13632
9d6b648c 13633 if (BNXT_CHIP_P5(bp)) {
e38287b7 13634 bp->flags |= BNXT_FLAG_CHIP_P5;
9d6b648c
MC
13635 if (BNXT_CHIP_SR2(bp))
13636 bp->flags |= BNXT_FLAG_CHIP_SR2;
13637 }
e38287b7 13638
5fa65524
EP
13639 rc = bnxt_alloc_rss_indir_tbl(bp);
13640 if (rc)
13641 goto init_err_pci_clean;
13642
7c380918 13643 rc = bnxt_fw_init_one_p2(bp);
3c2217a6
MC
13644 if (rc)
13645 goto init_err_pci_clean;
13646
8ae24738
MC
13647 rc = bnxt_map_db_bar(bp);
13648 if (rc) {
13649 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13650 rc);
13651 goto init_err_pci_clean;
13652 }
13653
c0c050c5
MC
13654 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13655 NETIF_F_TSO | NETIF_F_TSO6 |
13656 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 13657 NETIF_F_GSO_IPXIP4 |
152971ee
AD
13658 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13659 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
13660 NETIF_F_RXCSUM | NETIF_F_GRO;
13661
e38287b7 13662 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 13663 dev->hw_features |= NETIF_F_LRO;
c0c050c5 13664
c0c050c5
MC
13665 dev->hw_enc_features =
13666 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13667 NETIF_F_TSO | NETIF_F_TSO6 |
13668 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 13669 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 13670 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
442a35a5
JK
13671 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13672
152971ee
AD
13673 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13674 NETIF_F_GSO_GRE_CSUM;
c0c050c5 13675 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
1da63ddd
EP
13676 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13677 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13678 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13679 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
e38287b7 13680 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 13681 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 13682 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
13683 if (dev->features & NETIF_F_GRO_HW)
13684 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
13685 dev->priv_flags |= IFF_UNICAST_FLT;
13686
b6488b16
CL
13687 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
13688
c0c050c5
MC
13689#ifdef CONFIG_BNXT_SRIOV
13690 init_waitqueue_head(&bp->sriov_cfg_wait);
13691#endif
e38287b7
MC
13692 if (BNXT_SUPPORTS_TPA(bp)) {
13693 bp->gro_func = bnxt_gro_func_5730x;
67912c36 13694 if (BNXT_CHIP_P4(bp))
e38287b7 13695 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
13696 else if (BNXT_CHIP_P5(bp))
13697 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
13698 }
13699 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 13700 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 13701
a22a6ac2
MC
13702 rc = bnxt_init_mac_addr(bp);
13703 if (rc) {
13704 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13705 rc = -EADDRNOTAVAIL;
13706 goto init_err_pci_clean;
13707 }
c0c050c5 13708
2e9217d1
VV
13709 if (BNXT_PF(bp)) {
13710 /* Read the adapter's DSN to use as the eswitch switch_id */
b014232f 13711 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
2e9217d1 13712 }
567b2abe 13713
7eb9bb3a
MC
13714 /* MTU range: 60 - FW defined max */
13715 dev->min_mtu = ETH_ZLEN;
13716 dev->max_mtu = bp->max_mtu;
13717
ba642ab7 13718 rc = bnxt_probe_phy(bp, true);
d5430d31
MC
13719 if (rc)
13720 goto init_err_pci_clean;
13721
c61fb99c 13722 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
13723 bnxt_set_tpa_flags(bp);
13724 bnxt_set_ring_params(bp);
702c221c 13725 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5 13726 if (rc) {
662c9b22
EP
13727 if (BNXT_VF(bp) && rc == -ENODEV) {
13728 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13729 } else {
13730 netdev_err(bp->dev, "Not enough rings available.\n");
13731 rc = -ENOMEM;
13732 }
17086399 13733 goto init_err_pci_clean;
bdbd1eb5 13734 }
c0c050c5 13735
ba642ab7 13736 bnxt_fw_init_one_p3(bp);
2bcfa6f6 13737
df78ea22
MC
13738 bnxt_init_dflt_coal(bp);
13739
a196e96b 13740 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
13741 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13742
7809592d 13743 rc = bnxt_init_int_mode(bp);
c0c050c5 13744 if (rc)
17086399 13745 goto init_err_pci_clean;
c0c050c5 13746
832aed16
MC
13747 /* No TC has been set yet and rings may have been trimmed due to
13748 * limited MSIX, so we re-initialize the TX rings per TC.
13749 */
13750 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13751
c213eae8
MC
13752 if (BNXT_PF(bp)) {
13753 if (!bnxt_pf_wq) {
13754 bnxt_pf_wq =
13755 create_singlethread_workqueue("bnxt_pf_wq");
13756 if (!bnxt_pf_wq) {
13757 dev_err(&pdev->dev, "Unable to create workqueue.\n");
b5f796b6 13758 rc = -ENOMEM;
c213eae8
MC
13759 goto init_err_pci_clean;
13760 }
13761 }
18c7015c
JK
13762 rc = bnxt_init_tc(bp);
13763 if (rc)
13764 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13765 rc);
c213eae8 13766 }
2ae7408f 13767
190eda1a 13768 bnxt_inv_fw_health_reg(bp);
e624c70e
LR
13769 rc = bnxt_dl_register(bp);
13770 if (rc)
13771 goto init_err_dl;
cda2cab0 13772
7809592d
MC
13773 rc = register_netdev(dev);
13774 if (rc)
cda2cab0 13775 goto init_err_cleanup;
7809592d 13776
7e334fc8 13777 bnxt_dl_fw_reporters_create(bp);
4ab0c6a8 13778
c7dd4a5b 13779 bnxt_print_device_info(bp);
90c4f788 13780
df3875ec 13781 pci_save_state(pdev);
c0c050c5
MC
13782 return 0;
13783
cda2cab0
VV
13784init_err_cleanup:
13785 bnxt_dl_unregister(bp);
e624c70e 13786init_err_dl:
2ae7408f 13787 bnxt_shutdown_tc(bp);
7809592d
MC
13788 bnxt_clear_int_mode(bp);
13789
17086399 13790init_err_pci_clean:
bdb38602 13791 bnxt_hwrm_func_drv_unrgtr(bp);
a2bf74f4 13792 bnxt_free_hwrm_resources(bp);
03400aaa 13793 bnxt_ethtool_free(bp);
a521c8a0 13794 bnxt_ptp_clear(bp);
ae5c42f0
MC
13795 kfree(bp->ptp_cfg);
13796 bp->ptp_cfg = NULL;
07f83d72
MC
13797 kfree(bp->fw_health);
13798 bp->fw_health = NULL;
17086399 13799 bnxt_cleanup_pci(bp);
62bfb932
MC
13800 bnxt_free_ctx_mem(bp);
13801 kfree(bp->ctx);
13802 bp->ctx = NULL;
1667cbf6
MC
13803 kfree(bp->rss_indir_tbl);
13804 bp->rss_indir_tbl = NULL;
c0c050c5
MC
13805
13806init_err_free:
13807 free_netdev(dev);
13808 return rc;
13809}
13810
d196ece7
MC
13811static void bnxt_shutdown(struct pci_dev *pdev)
13812{
13813 struct net_device *dev = pci_get_drvdata(pdev);
13814 struct bnxt *bp;
13815
13816 if (!dev)
13817 return;
13818
13819 rtnl_lock();
13820 bp = netdev_priv(dev);
13821 if (!bp)
13822 goto shutdown_exit;
13823
13824 if (netif_running(dev))
13825 dev_close(dev);
13826
a7f3f939 13827 bnxt_ulp_shutdown(bp);
5567ae4a
VV
13828 bnxt_clear_int_mode(bp);
13829 pci_disable_device(pdev);
a7f3f939 13830
d196ece7 13831 if (system_state == SYSTEM_POWER_OFF) {
d196ece7
MC
13832 pci_wake_from_d3(pdev, bp->wol);
13833 pci_set_power_state(pdev, PCI_D3hot);
13834 }
13835
13836shutdown_exit:
13837 rtnl_unlock();
13838}
13839
f65a2044
MC
13840#ifdef CONFIG_PM_SLEEP
13841static int bnxt_suspend(struct device *device)
13842{
f521eaa9 13843 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13844 struct bnxt *bp = netdev_priv(dev);
13845 int rc = 0;
13846
13847 rtnl_lock();
6a68749d 13848 bnxt_ulp_stop(bp);
f65a2044
MC
13849 if (netif_running(dev)) {
13850 netif_device_detach(dev);
13851 rc = bnxt_close(dev);
13852 }
13853 bnxt_hwrm_func_drv_unrgtr(bp);
ef02af8c 13854 pci_disable_device(bp->pdev);
f9b69d7f
VV
13855 bnxt_free_ctx_mem(bp);
13856 kfree(bp->ctx);
13857 bp->ctx = NULL;
f65a2044
MC
13858 rtnl_unlock();
13859 return rc;
13860}
13861
13862static int bnxt_resume(struct device *device)
13863{
f521eaa9 13864 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13865 struct bnxt *bp = netdev_priv(dev);
13866 int rc = 0;
13867
13868 rtnl_lock();
ef02af8c
MC
13869 rc = pci_enable_device(bp->pdev);
13870 if (rc) {
13871 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13872 rc);
13873 goto resume_exit;
13874 }
13875 pci_set_master(bp->pdev);
f92335d8 13876 if (bnxt_hwrm_ver_get(bp)) {
f65a2044
MC
13877 rc = -ENODEV;
13878 goto resume_exit;
13879 }
13880 rc = bnxt_hwrm_func_reset(bp);
13881 if (rc) {
13882 rc = -EBUSY;
13883 goto resume_exit;
13884 }
f92335d8 13885
2084ccf6
MC
13886 rc = bnxt_hwrm_func_qcaps(bp);
13887 if (rc)
f9b69d7f 13888 goto resume_exit;
f92335d8
VV
13889
13890 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13891 rc = -ENODEV;
13892 goto resume_exit;
13893 }
13894
f65a2044
MC
13895 bnxt_get_wol_settings(bp);
13896 if (netif_running(dev)) {
13897 rc = bnxt_open(dev);
13898 if (!rc)
13899 netif_device_attach(dev);
13900 }
13901
13902resume_exit:
6a68749d 13903 bnxt_ulp_start(bp, rc);
59ae2101
MC
13904 if (!rc)
13905 bnxt_reenable_sriov(bp);
f65a2044
MC
13906 rtnl_unlock();
13907 return rc;
13908}
13909
13910static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13911#define BNXT_PM_OPS (&bnxt_pm_ops)
13912
13913#else
13914
13915#define BNXT_PM_OPS NULL
13916
13917#endif /* CONFIG_PM_SLEEP */
13918
6316ea6d
SB
13919/**
13920 * bnxt_io_error_detected - called when PCI error is detected
13921 * @pdev: Pointer to PCI device
13922 * @state: The current pci connection state
13923 *
13924 * This function is called after a PCI bus error affecting
13925 * this device has been detected.
13926 */
13927static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13928 pci_channel_state_t state)
13929{
13930 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 13931 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
13932
13933 netdev_info(netdev, "PCI I/O error detected\n");
13934
13935 rtnl_lock();
13936 netif_device_detach(netdev);
13937
a588e458
MC
13938 bnxt_ulp_stop(bp);
13939
6316ea6d
SB
13940 if (state == pci_channel_io_perm_failure) {
13941 rtnl_unlock();
13942 return PCI_ERS_RESULT_DISCONNECT;
13943 }
13944
f75d9a0a
VV
13945 if (state == pci_channel_io_frozen)
13946 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13947
6316ea6d
SB
13948 if (netif_running(netdev))
13949 bnxt_close(netdev);
13950
c81cfb62
KA
13951 if (pci_is_enabled(pdev))
13952 pci_disable_device(pdev);
6e2f8388
MC
13953 bnxt_free_ctx_mem(bp);
13954 kfree(bp->ctx);
13955 bp->ctx = NULL;
6316ea6d
SB
13956 rtnl_unlock();
13957
13958 /* Request a slot slot reset. */
13959 return PCI_ERS_RESULT_NEED_RESET;
13960}
13961
13962/**
13963 * bnxt_io_slot_reset - called after the pci bus has been reset.
13964 * @pdev: Pointer to PCI device
13965 *
13966 * Restart the card from scratch, as if from a cold-boot.
13967 * At this point, the card has exprienced a hard reset,
13968 * followed by fixups by BIOS, and has its config space
13969 * set up identically to what it was at cold boot.
13970 */
13971static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13972{
fb1e6e56 13973 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6316ea6d
SB
13974 struct net_device *netdev = pci_get_drvdata(pdev);
13975 struct bnxt *bp = netdev_priv(netdev);
0cf736a1
VG
13976 int retry = 0;
13977 int err = 0;
13978 int off;
6316ea6d
SB
13979
13980 netdev_info(bp->dev, "PCI Slot Reset\n");
13981
13982 rtnl_lock();
13983
13984 if (pci_enable_device(pdev)) {
13985 dev_err(&pdev->dev,
13986 "Cannot re-enable PCI device after reset.\n");
13987 } else {
13988 pci_set_master(pdev);
f75d9a0a
VV
13989 /* Upon fatal error, our device internal logic that latches to
13990 * BAR value is getting reset and will restore only upon
13991 * rewritting the BARs.
13992 *
13993 * As pci_restore_state() does not re-write the BARs if the
13994 * value is same as saved value earlier, driver needs to
13995 * write the BARs to 0 to force restore, in case of fatal error.
13996 */
13997 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13998 &bp->state)) {
13999 for (off = PCI_BASE_ADDRESS_0;
14000 off <= PCI_BASE_ADDRESS_5; off += 4)
14001 pci_write_config_dword(bp->pdev, off, 0);
14002 }
df3875ec
VV
14003 pci_restore_state(pdev);
14004 pci_save_state(pdev);
6316ea6d 14005
0cf736a1
VG
14006 bnxt_inv_fw_health_reg(bp);
14007 bnxt_try_map_fw_health_reg(bp);
14008
14009 /* In some PCIe AER scenarios, firmware may take up to
14010 * 10 seconds to become ready in the worst case.
14011 */
14012 do {
14013 err = bnxt_try_recover_fw(bp);
14014 if (!err)
14015 break;
14016 retry++;
14017 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
14018
14019 if (err) {
14020 dev_err(&pdev->dev, "Firmware not ready\n");
14021 goto reset_exit;
14022 }
14023
aa8ed021 14024 err = bnxt_hwrm_func_reset(bp);
fb1e6e56 14025 if (!err)
6e2f8388 14026 result = PCI_ERS_RESULT_RECOVERED;
0cf736a1
VG
14027
14028 bnxt_ulp_irq_stop(bp);
14029 bnxt_clear_int_mode(bp);
14030 err = bnxt_init_int_mode(bp);
14031 bnxt_ulp_irq_restart(bp, err);
bae361c5 14032 }
6316ea6d 14033
0cf736a1
VG
14034reset_exit:
14035 bnxt_clear_reservations(bp, true);
6316ea6d
SB
14036 rtnl_unlock();
14037
bae361c5 14038 return result;
6316ea6d
SB
14039}
14040
14041/**
14042 * bnxt_io_resume - called when traffic can start flowing again.
14043 * @pdev: Pointer to PCI device
14044 *
14045 * This callback is called when the error recovery driver tells
14046 * us that its OK to resume normal operation.
14047 */
14048static void bnxt_io_resume(struct pci_dev *pdev)
14049{
14050 struct net_device *netdev = pci_get_drvdata(pdev);
fb1e6e56
VV
14051 struct bnxt *bp = netdev_priv(netdev);
14052 int err;
6316ea6d 14053
fb1e6e56 14054 netdev_info(bp->dev, "PCI Slot Resume\n");
6316ea6d
SB
14055 rtnl_lock();
14056
fb1e6e56
VV
14057 err = bnxt_hwrm_func_qcaps(bp);
14058 if (!err && netif_running(netdev))
14059 err = bnxt_open(netdev);
14060
14061 bnxt_ulp_start(bp, err);
14062 if (!err) {
14063 bnxt_reenable_sriov(bp);
14064 netif_device_attach(netdev);
14065 }
6316ea6d
SB
14066
14067 rtnl_unlock();
14068}
14069
14070static const struct pci_error_handlers bnxt_err_handler = {
14071 .error_detected = bnxt_io_error_detected,
14072 .slot_reset = bnxt_io_slot_reset,
14073 .resume = bnxt_io_resume
14074};
14075
c0c050c5
MC
14076static struct pci_driver bnxt_pci_driver = {
14077 .name = DRV_MODULE_NAME,
14078 .id_table = bnxt_pci_tbl,
14079 .probe = bnxt_init_one,
14080 .remove = bnxt_remove_one,
d196ece7 14081 .shutdown = bnxt_shutdown,
f65a2044 14082 .driver.pm = BNXT_PM_OPS,
6316ea6d 14083 .err_handler = &bnxt_err_handler,
c0c050c5
MC
14084#if defined(CONFIG_BNXT_SRIOV)
14085 .sriov_configure = bnxt_sriov_configure,
14086#endif
14087};
14088
c213eae8
MC
14089static int __init bnxt_init(void)
14090{
991aef4e
GC
14091 int err;
14092
cabfb09d 14093 bnxt_debug_init();
991aef4e
GC
14094 err = pci_register_driver(&bnxt_pci_driver);
14095 if (err) {
14096 bnxt_debug_exit();
14097 return err;
14098 }
14099
14100 return 0;
c213eae8
MC
14101}
14102
14103static void __exit bnxt_exit(void)
14104{
14105 pci_unregister_driver(&bnxt_pci_driver);
14106 if (bnxt_pf_wq)
14107 destroy_workqueue(bnxt_pf_wq);
cabfb09d 14108 bnxt_debug_exit();
c213eae8
MC
14109}
14110
14111module_init(bnxt_init);
14112module_exit(bnxt_exit);