Commit | Line | Data |
---|---|---|
837f08fd AV |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright (c) 2018, Intel Corporation. */ | |
3 | ||
4 | #ifndef _ICE_H_ | |
5 | #define _ICE_H_ | |
6 | ||
7 | #include <linux/types.h> | |
8 | #include <linux/errno.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
462acf6a | 11 | #include <linux/firmware.h> |
837f08fd AV |
12 | #include <linux/netdevice.h> |
13 | #include <linux/compiler.h> | |
dc49c772 | 14 | #include <linux/etherdevice.h> |
cdedef59 | 15 | #include <linux/skbuff.h> |
3a858ba3 | 16 | #include <linux/cpumask.h> |
fcea6f3d | 17 | #include <linux/rtnetlink.h> |
3a858ba3 | 18 | #include <linux/if_vlan.h> |
cdedef59 | 19 | #include <linux/dma-mapping.h> |
837f08fd | 20 | #include <linux/pci.h> |
940b61af | 21 | #include <linux/workqueue.h> |
d69ea414 | 22 | #include <linux/wait.h> |
837f08fd | 23 | #include <linux/aer.h> |
940b61af | 24 | #include <linux/interrupt.h> |
fcea6f3d | 25 | #include <linux/ethtool.h> |
940b61af | 26 | #include <linux/timer.h> |
7ec59eea | 27 | #include <linux/delay.h> |
837f08fd | 28 | #include <linux/bitmap.h> |
3a858ba3 | 29 | #include <linux/log2.h> |
d76a60ba | 30 | #include <linux/ip.h> |
cf909e19 | 31 | #include <linux/sctp.h> |
d76a60ba | 32 | #include <linux/ipv6.h> |
efc2214b | 33 | #include <linux/pkt_sched.h> |
940b61af | 34 | #include <linux/if_bridge.h> |
e3710a01 | 35 | #include <linux/ctype.h> |
efc2214b | 36 | #include <linux/bpf.h> |
195bb48f | 37 | #include <linux/btf.h> |
f9f5301e | 38 | #include <linux/auxiliary_bus.h> |
ddf30f7f | 39 | #include <linux/avf/virtchnl.h> |
28bf2672 | 40 | #include <linux/cpu_rmap.h> |
cdf1f1f1 | 41 | #include <linux/dim.h> |
0754d65b | 42 | #include <net/pkt_cls.h> |
9fea7498 KP |
43 | #include <net/tc_act/tc_mirred.h> |
44 | #include <net/tc_act/tc_gact.h> | |
45 | #include <net/ip.h> | |
1adf7ead | 46 | #include <net/devlink.h> |
d76a60ba | 47 | #include <net/ipv6.h> |
2d4238f5 | 48 | #include <net/xdp_sock.h> |
c7a21904 | 49 | #include <net/xdp_sock_drv.h> |
a4e82a81 TN |
50 | #include <net/geneve.h> |
51 | #include <net/gre.h> | |
52 | #include <net/udp_tunnel.h> | |
53 | #include <net/vxlan.h> | |
9a225f81 | 54 | #include <net/gtp.h> |
837f08fd AV |
55 | #include "ice_devids.h" |
56 | #include "ice_type.h" | |
940b61af | 57 | #include "ice_txrx.h" |
37b6f646 | 58 | #include "ice_dcb.h" |
9c20346b | 59 | #include "ice_switch.h" |
f31e4b6f | 60 | #include "ice_common.h" |
fbc7b27a | 61 | #include "ice_flow.h" |
9c20346b | 62 | #include "ice_sched.h" |
348048e7 | 63 | #include "ice_idc_int.h" |
0deb0bf7 | 64 | #include "ice_sriov.h" |
d775155a | 65 | #include "ice_vf_mbx.h" |
06c16d89 | 66 | #include "ice_ptp.h" |
148beb61 | 67 | #include "ice_fdir.h" |
2d4238f5 | 68 | #include "ice_xsk.h" |
28bf2672 | 69 | #include "ice_arfs.h" |
37165e3f | 70 | #include "ice_repr.h" |
0d08a441 | 71 | #include "ice_eswitch.h" |
df006dd4 | 72 | #include "ice_lag.h" |
bc42afa9 | 73 | #include "ice_vsi_vlan_ops.h" |
43113ff7 | 74 | #include "ice_gnss.h" |
837f08fd AV |
75 | |
76 | #define ICE_BAR0 0 | |
3a858ba3 | 77 | #define ICE_REQ_DESC_MULTIPLE 32 |
8be92a76 | 78 | #define ICE_MIN_NUM_DESC 64 |
3b6bf296 | 79 | #define ICE_MAX_NUM_DESC 8160 |
1aec6e1b | 80 | #define ICE_DFLT_MIN_RX_DESC 512 |
dd47e1fd JB |
81 | #define ICE_DFLT_NUM_TX_DESC 256 |
82 | #define ICE_DFLT_NUM_RX_DESC 2048 | |
ad71b256 | 83 | |
5513b920 | 84 | #define ICE_DFLT_TRAFFIC_CLASS BIT(0) |
940b61af | 85 | #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) |
8f5ee3c4 | 86 | #define ICE_AQ_LEN 192 |
11836214 | 87 | #define ICE_MBXSQ_LEN 64 |
8f5ee3c4 | 88 | #define ICE_SBQ_LEN 64 |
f3fe97f6 BC |
89 | #define ICE_MIN_LAN_TXRX_MSIX 1 |
90 | #define ICE_MIN_LAN_OICR_MSIX 1 | |
91 | #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) | |
da62c5ff | 92 | #define ICE_FDIR_MSIX 2 |
d25a0fc4 DE |
93 | #define ICE_RDMA_NUM_AEQ_MSIX 4 |
94 | #define ICE_MIN_RDMA_MSIX 2 | |
f66756e0 | 95 | #define ICE_ESWITCH_MSIX 1 |
3a858ba3 | 96 | #define ICE_NO_VSI 0xffff |
3a858ba3 AV |
97 | #define ICE_VSI_MAP_CONTIG 0 |
98 | #define ICE_VSI_MAP_SCATTER 1 | |
99 | #define ICE_MAX_SCATTER_TXQS 16 | |
100 | #define ICE_MAX_SCATTER_RXQS 16 | |
cdedef59 AV |
101 | #define ICE_Q_WAIT_RETRY_LIMIT 10 |
102 | #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) | |
d76a60ba | 103 | #define ICE_MAX_LG_RSS_QS 256 |
940b61af AV |
104 | #define ICE_RES_VALID_BIT 0x8000 |
105 | #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) | |
d25a0fc4 | 106 | #define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1) |
da62c5ff | 107 | /* All VF control VSIs share the same IRQ, so assign a unique ID for them */ |
d25a0fc4 | 108 | #define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) |
3a858ba3 | 109 | #define ICE_INVAL_Q_INDEX 0xffff |
837f08fd | 110 | |
8134d5ff | 111 | #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ |
0754d65b KP |
112 | |
113 | #define ICE_CHNL_START_TC 1 | |
0754d65b | 114 | |
afd9d4ab AV |
115 | #define ICE_MAX_RESET_WAIT 20 |
116 | ||
fcea6f3d AV |
117 | #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) |
118 | ||
837f08fd AV |
119 | #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) |
120 | ||
efc2214b | 121 | #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD) |
3a858ba3 AV |
122 | |
123 | #define ICE_UP_TABLE_TRANSLATE(val, i) \ | |
124 | (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ | |
125 | ICE_AQ_VSI_UP_TABLE_UP##i##_M) | |
126 | ||
2b245cb2 | 127 | #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i])) |
cdedef59 | 128 | #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i])) |
d76a60ba | 129 | #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i])) |
cac2a27c | 130 | #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i])) |
cdedef59 | 131 | |
fbc7b27a KP |
132 | /* Minimum BW limit is 500 Kbps for any scheduler node */ |
133 | #define ICE_MIN_BW_LIMIT 500 | |
134 | /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes. | |
135 | * use it to convert user specified BW limit into Kbps | |
136 | */ | |
137 | #define ICE_BW_KBPS_DIVISOR 125 | |
138 | ||
0b28b702 AV |
139 | /* Macro for each VSI in a PF */ |
140 | #define ice_for_each_vsi(pf, i) \ | |
141 | for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) | |
142 | ||
2faf63b6 | 143 | /* Macros for each Tx/Xdp/Rx ring in a VSI */ |
cdedef59 AV |
144 | #define ice_for_each_txq(vsi, i) \ |
145 | for ((i) = 0; (i) < (vsi)->num_txq; (i)++) | |
146 | ||
2faf63b6 MF |
147 | #define ice_for_each_xdp_txq(vsi, i) \ |
148 | for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++) | |
149 | ||
cdedef59 AV |
150 | #define ice_for_each_rxq(vsi, i) \ |
151 | for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) | |
152 | ||
d337f2af | 153 | /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */ |
f8ba7db8 JK |
154 | #define ice_for_each_alloc_txq(vsi, i) \ |
155 | for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) | |
156 | ||
157 | #define ice_for_each_alloc_rxq(vsi, i) \ | |
158 | for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) | |
159 | ||
67fe64d7 BC |
160 | #define ice_for_each_q_vector(vsi, i) \ |
161 | for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) | |
162 | ||
0754d65b KP |
163 | #define ice_for_each_chnl_tc(i) \ |
164 | for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) | |
165 | ||
1a8c7778 | 166 | #define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX) |
5eda8afd AA |
167 | |
168 | #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ | |
5eda8afd | 169 | ICE_PROMISC_UCAST_RX | \ |
5eda8afd AA |
170 | ICE_PROMISC_VLAN_TX | \ |
171 | ICE_PROMISC_VLAN_RX) | |
172 | ||
173 | #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) | |
174 | ||
175 | #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \ | |
176 | ICE_PROMISC_MCAST_RX | \ | |
177 | ICE_PROMISC_VLAN_TX | \ | |
178 | ICE_PROMISC_VLAN_RX) | |
179 | ||
4015d11e BC |
180 | #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) |
181 | ||
40b24760 AV |
182 | enum ice_feature { |
183 | ICE_F_DSCP, | |
325b2064 | 184 | ICE_F_SMA_CTRL, |
43113ff7 | 185 | ICE_F_GNSS, |
40b24760 AV |
186 | ICE_F_MAX |
187 | }; | |
188 | ||
22bf877e MF |
189 | DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key); |
190 | ||
0754d65b KP |
191 | struct ice_channel { |
192 | struct list_head list; | |
193 | u8 type; | |
194 | u16 sw_id; | |
195 | u16 base_q; | |
196 | u16 num_rxq; | |
197 | u16 num_txq; | |
198 | u16 vsi_num; | |
199 | u8 ena_tc; | |
200 | struct ice_aqc_vsi_props info; | |
201 | u64 max_tx_rate; | |
202 | u64 min_tx_rate; | |
40319796 | 203 | atomic_t num_sb_fltr; |
0754d65b KP |
204 | struct ice_vsi *ch_vsi; |
205 | }; | |
206 | ||
eff380aa AV |
207 | struct ice_txq_meta { |
208 | u32 q_teid; /* Tx-scheduler element identifier */ | |
209 | u16 q_id; /* Entry in VSI's txq_map bitmap */ | |
210 | u16 q_handle; /* Relative index of Tx queue within TC */ | |
211 | u16 vsi_idx; /* VSI index that Tx queue belongs to */ | |
212 | u8 tc; /* TC number that Tx queue belongs to */ | |
213 | }; | |
214 | ||
3a858ba3 AV |
215 | struct ice_tc_info { |
216 | u16 qoffset; | |
c5a2a4a3 UK |
217 | u16 qcount_tx; |
218 | u16 qcount_rx; | |
219 | u8 netdev_tc; | |
3a858ba3 AV |
220 | }; |
221 | ||
222 | struct ice_tc_cfg { | |
223 | u8 numtc; /* Total number of enabled TCs */ | |
0754d65b | 224 | u16 ena_tc; /* Tx map */ |
3a858ba3 AV |
225 | struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS]; |
226 | }; | |
227 | ||
940b61af AV |
228 | struct ice_res_tracker { |
229 | u16 num_entries; | |
cbe66bfe | 230 | u16 end; |
e94c0df9 | 231 | u16 list[]; |
940b61af AV |
232 | }; |
233 | ||
03f7a986 | 234 | struct ice_qs_cfg { |
94c4441b | 235 | struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */ |
03f7a986 AV |
236 | unsigned long *pf_map; |
237 | unsigned long pf_map_size; | |
238 | unsigned int q_count; | |
239 | unsigned int scatter_count; | |
240 | u16 *vsi_map; | |
241 | u16 vsi_map_offset; | |
242 | u8 mapping_mode; | |
243 | }; | |
244 | ||
940b61af AV |
245 | struct ice_sw { |
246 | struct ice_pf *pf; | |
247 | u16 sw_id; /* switch ID for this switch */ | |
248 | u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */ | |
fc0f39bc BC |
249 | struct ice_vsi *dflt_vsi; /* default VSI for this switch */ |
250 | u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */ | |
940b61af AV |
251 | }; |
252 | ||
e97fb1ae | 253 | enum ice_pf_state { |
7e408e07 AV |
254 | ICE_TESTING, |
255 | ICE_DOWN, | |
256 | ICE_NEEDS_RESTART, | |
257 | ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ | |
258 | ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ | |
348048e7 DE |
259 | ICE_PFR_REQ, /* set by driver */ |
260 | ICE_CORER_REQ, /* set by driver */ | |
261 | ICE_GLOBR_REQ, /* set by driver */ | |
7e408e07 AV |
262 | ICE_CORER_RECV, /* set by OICR handler */ |
263 | ICE_GLOBR_RECV, /* set by OICR handler */ | |
264 | ICE_EMPR_RECV, /* set by OICR handler */ | |
265 | ICE_SUSPENDED, /* set on module remove path */ | |
266 | ICE_RESET_FAILED, /* set by reset/rebuild */ | |
ddf30f7f AV |
267 | /* When checking for the PF to be in a nominal operating state, the |
268 | * bits that are grouped at the beginning of the list need to be | |
7e408e07 | 269 | * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will |
df17b7e0 | 270 | * be checked. If you need to add a bit into consideration for nominal |
ddf30f7f | 271 | * operating state, it must be added before |
7e408e07 | 272 | * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position |
ddf30f7f AV |
273 | * without appropriate consideration. |
274 | */ | |
7e408e07 AV |
275 | ICE_STATE_NOMINAL_CHECK_BITS, |
276 | ICE_ADMINQ_EVENT_PENDING, | |
277 | ICE_MAILBOXQ_EVENT_PENDING, | |
8f5ee3c4 | 278 | ICE_SIDEBANDQ_EVENT_PENDING, |
7e408e07 AV |
279 | ICE_MDD_EVENT_PENDING, |
280 | ICE_VFLR_EVENT_PENDING, | |
281 | ICE_FLTR_OVERFLOW_PROMISC, | |
282 | ICE_VF_DIS, | |
283 | ICE_CFG_BUSY, | |
284 | ICE_SERVICE_SCHED, | |
285 | ICE_SERVICE_DIS, | |
286 | ICE_FD_FLUSH_REQ, | |
287 | ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ | |
288 | ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ | |
289 | ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ | |
290 | ICE_LINK_DEFAULT_OVERRIDE_PENDING, | |
291 | ICE_PHY_INIT_COMPLETE, | |
292 | ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ | |
32d53c0a | 293 | ICE_AUX_ERR_PENDING, |
7e408e07 | 294 | ICE_STATE_NBITS /* must be last */ |
837f08fd AV |
295 | }; |
296 | ||
e97fb1ae AV |
297 | enum ice_vsi_state { |
298 | ICE_VSI_DOWN, | |
299 | ICE_VSI_NEEDS_RESTART, | |
a476d72a AV |
300 | ICE_VSI_NETDEV_ALLOCD, |
301 | ICE_VSI_NETDEV_REGISTERED, | |
e97fb1ae AV |
302 | ICE_VSI_UMAC_FLTR_CHANGED, |
303 | ICE_VSI_MMAC_FLTR_CHANGED, | |
e97fb1ae AV |
304 | ICE_VSI_PROMISC_CHANGED, |
305 | ICE_VSI_STATE_NBITS /* must be last */ | |
e94d4478 AV |
306 | }; |
307 | ||
940b61af AV |
308 | /* struct that defines a VSI, associated with a dev */ |
309 | struct ice_vsi { | |
310 | struct net_device *netdev; | |
3a858ba3 AV |
311 | struct ice_sw *vsw; /* switch this VSI is on */ |
312 | struct ice_pf *back; /* back pointer to PF */ | |
940b61af | 313 | struct ice_port_info *port_info; /* back pointer to port_info */ |
e72bba21 MF |
314 | struct ice_rx_ring **rx_rings; /* Rx ring array */ |
315 | struct ice_tx_ring **tx_rings; /* Tx ring array */ | |
3a858ba3 | 316 | struct ice_q_vector **q_vectors; /* q_vector array */ |
cdedef59 AV |
317 | |
318 | irqreturn_t (*irq_handler)(int irq, void *data); | |
319 | ||
fcea6f3d | 320 | u64 tx_linearize; |
e97fb1ae | 321 | DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS); |
e94d4478 | 322 | unsigned int current_netdev_flags; |
fcea6f3d AV |
323 | u32 tx_restart; |
324 | u32 tx_busy; | |
325 | u32 rx_buf_failed; | |
326 | u32 rx_page_failed; | |
88865fc4 KK |
327 | u16 num_q_vectors; |
328 | u16 base_vector; /* IRQ base for OS reserved vectors */ | |
3a858ba3 | 329 | enum ice_vsi_type type; |
df17b7e0 AV |
330 | u16 vsi_num; /* HW (absolute) index of this VSI */ |
331 | u16 idx; /* software index in pf->vsi[] */ | |
3a858ba3 | 332 | |
b03d519d | 333 | struct ice_vf *vf; /* VF associated with this VSI */ |
8ede0178 | 334 | |
d95276ce | 335 | u16 ethtype; /* Ethernet protocol for pause frame */ |
148beb61 HT |
336 | u16 num_gfltr; |
337 | u16 num_bfltr; | |
d95276ce | 338 | |
d76a60ba AV |
339 | /* RSS config */ |
340 | u16 rss_table_size; /* HW RSS table size */ | |
341 | u16 rss_size; /* Allocated RSS queues */ | |
342 | u8 *rss_hkey_user; /* User configured hash keys */ | |
343 | u8 *rss_lut_user; /* User configured lookup table entries */ | |
344 | u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */ | |
345 | ||
28bf2672 BC |
346 | /* aRFS members only allocated for the PF VSI */ |
347 | #define ICE_MAX_ARFS_LIST 1024 | |
348 | #define ICE_ARFS_LST_MASK (ICE_MAX_ARFS_LIST - 1) | |
349 | struct hlist_head *arfs_fltr_list; | |
350 | struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs; | |
351 | spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ | |
352 | atomic_t *arfs_last_fltr_id; | |
353 | ||
cdedef59 AV |
354 | u16 max_frame; |
355 | u16 rx_buf_len; | |
356 | ||
3a858ba3 AV |
357 | struct ice_aqc_vsi_props info; /* VSI properties */ |
358 | ||
fcea6f3d AV |
359 | /* VSI stats */ |
360 | struct rtnl_link_stats64 net_stats; | |
361 | struct ice_eth_stats eth_stats; | |
362 | struct ice_eth_stats eth_stats_prev; | |
363 | ||
e94d4478 AV |
364 | struct list_head tmp_sync_list; /* MAC filters to be synced */ |
365 | struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ | |
366 | ||
0ab54c5f JB |
367 | u8 irqs_ready:1; |
368 | u8 current_isup:1; /* Sync 'link up' logging */ | |
369 | u8 stat_offsets_loaded:1; | |
c31af68a BC |
370 | struct ice_vsi_vlan_ops inner_vlan_ops; |
371 | struct ice_vsi_vlan_ops outer_vlan_ops; | |
cd6d6b83 | 372 | u16 num_vlan; |
cdedef59 | 373 | |
3a858ba3 AV |
374 | /* queue information */ |
375 | u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ | |
376 | u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ | |
78b5713a AV |
377 | u16 *txq_map; /* index in pf->avail_txqs */ |
378 | u16 *rxq_map; /* index in pf->avail_rxqs */ | |
3a858ba3 AV |
379 | u16 alloc_txq; /* Allocated Tx queues */ |
380 | u16 num_txq; /* Used Tx queues */ | |
381 | u16 alloc_rxq; /* Allocated Rx queues */ | |
382 | u16 num_rxq; /* Used Rx queues */ | |
87324e74 HT |
383 | u16 req_txq; /* User requested Tx queues */ |
384 | u16 req_rxq; /* User requested Rx queues */ | |
ad71b256 BC |
385 | u16 num_rx_desc; |
386 | u16 num_tx_desc; | |
348048e7 | 387 | u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; |
3a858ba3 | 388 | struct ice_tc_cfg tc_cfg; |
efc2214b | 389 | struct bpf_prog *xdp_prog; |
e72bba21 | 390 | struct ice_tx_ring **xdp_rings; /* XDP ring array */ |
e102db78 | 391 | unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ |
efc2214b MF |
392 | u16 num_xdp_txq; /* Used XDP queues */ |
393 | u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ | |
b126bd6b | 394 | |
1a1c40df GN |
395 | struct net_device **target_netdevs; |
396 | ||
0754d65b KP |
397 | struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ |
398 | ||
399 | /* Channel Specific Fields */ | |
400 | struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC]; | |
401 | u16 cnt_q_avail; | |
402 | u16 next_base_q; /* next queue to be used for channel setup */ | |
403 | struct list_head ch_list; | |
404 | u16 num_chnl_rxq; | |
405 | u16 num_chnl_txq; | |
406 | u16 ch_rss_size; | |
9fea7498 | 407 | u16 num_chnl_fltr; |
0754d65b KP |
408 | /* store away rss size info before configuring ADQ channels so that, |
409 | * it can be used after tc-qdisc delete, to get back RSS setting as | |
410 | * they were before | |
411 | */ | |
412 | u16 orig_rss_size; | |
413 | /* this keeps tracks of all enabled TC with and without DCB | |
414 | * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue | |
415 | * information | |
416 | */ | |
417 | u8 all_numtc; | |
418 | u16 all_enatc; | |
419 | ||
420 | /* store away TC info, to be used for rebuild logic */ | |
421 | u8 old_numtc; | |
422 | u16 old_ena_tc; | |
423 | ||
424 | struct ice_channel *ch; | |
425 | ||
b126bd6b KP |
426 | /* setup back reference, to which aggregator node this VSI |
427 | * corresponds to | |
428 | */ | |
429 | struct ice_agg_node *agg_node; | |
3a858ba3 AV |
430 | } ____cacheline_internodealigned_in_smp; |
431 | ||
432 | /* struct that defines an interrupt vector */ | |
433 | struct ice_q_vector { | |
434 | struct ice_vsi *vsi; | |
8244dd2d | 435 | |
3a858ba3 | 436 | u16 v_idx; /* index in the vsi->q_vector array. */ |
b07833a0 | 437 | u16 reg_idx; |
d337f2af | 438 | u8 num_ring_rx; /* total number of Rx rings in vector */ |
8244dd2d | 439 | u8 num_ring_tx; /* total number of Tx rings in vector */ |
cdf1f1f1 | 440 | u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ |
9e4ab4c2 BC |
441 | /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this |
442 | * value to the device | |
443 | */ | |
444 | u8 intrl; | |
8244dd2d BC |
445 | |
446 | struct napi_struct napi; | |
447 | ||
448 | struct ice_ring_container rx; | |
449 | struct ice_ring_container tx; | |
450 | ||
451 | cpumask_t affinity_mask; | |
452 | struct irq_affinity_notify affinity_notify; | |
453 | ||
fbc7b27a KP |
454 | struct ice_channel *ch; |
455 | ||
8244dd2d | 456 | char name[ICE_INT_NAME_STR_LEN]; |
cdf1f1f1 JK |
457 | |
458 | u16 total_events; /* net_dim(): number of interrupts processed */ | |
940b61af AV |
459 | } ____cacheline_internodealigned_in_smp; |
460 | ||
461 | enum ice_pf_flags { | |
940b61af | 462 | ICE_FLAG_FLTR_SYNC, |
d25a0fc4 | 463 | ICE_FLAG_RDMA_ENA, |
940b61af | 464 | ICE_FLAG_RSS_ENA, |
ddf30f7f | 465 | ICE_FLAG_SRIOV_ENA, |
75d2b253 | 466 | ICE_FLAG_SRIOV_CAPABLE, |
37b6f646 AV |
467 | ICE_FLAG_DCB_CAPABLE, |
468 | ICE_FLAG_DCB_ENA, | |
148beb61 | 469 | ICE_FLAG_FD_ENA, |
06c16d89 JK |
470 | ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ |
471 | ICE_FLAG_PTP, /* PTP is enabled by software */ | |
462acf6a | 472 | ICE_FLAG_ADV_FEATURES, |
0754d65b | 473 | ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ |
0d08a441 | 474 | ICE_FLAG_CLS_FLOWER, |
ab4ab73f | 475 | ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, |
b4e813dd | 476 | ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, |
6d599946 | 477 | ICE_FLAG_NO_MEDIA, |
84a118ab | 478 | ICE_FLAG_FW_LLDP_AGENT, |
c77849f5 | 479 | ICE_FLAG_MOD_POWER_UNSUPPORTED, |
99d40752 | 480 | ICE_FLAG_PHY_FW_LOAD_FAILED, |
3a257a14 | 481 | ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ |
7237f5b0 | 482 | ICE_FLAG_LEGACY_RX, |
01b5e89a | 483 | ICE_FLAG_VF_TRUE_PROMISC_ENA, |
9d5c5a52 | 484 | ICE_FLAG_MDD_AUTO_RESET_VF, |
f1da5a08 | 485 | ICE_FLAG_VF_VLAN_PRUNING, |
ea78ce4d | 486 | ICE_FLAG_LINK_LENIENT_MODE_ENA, |
5dbbbd01 | 487 | ICE_FLAG_PLUG_AUX_DEV, |
97b01291 | 488 | ICE_FLAG_MTU_CHANGED, |
43113ff7 | 489 | ICE_FLAG_GNSS, /* GNSS successfully initialized */ |
940b61af AV |
490 | ICE_PF_FLAGS_NBITS /* must be last */ |
491 | }; | |
492 | ||
1a1c40df GN |
493 | struct ice_switchdev_info { |
494 | struct ice_vsi *control_vsi; | |
495 | struct ice_vsi *uplink_vsi; | |
496 | bool is_running; | |
497 | }; | |
498 | ||
b126bd6b KP |
499 | struct ice_agg_node { |
500 | u32 agg_id; | |
501 | #define ICE_MAX_VSIS_IN_AGG_NODE 64 | |
502 | u32 num_vsis; | |
503 | u8 valid; | |
504 | }; | |
505 | ||
837f08fd AV |
506 | struct ice_pf { |
507 | struct pci_dev *pdev; | |
eb0208ec | 508 | |
dce730f1 | 509 | struct devlink_region *nvm_region; |
78ad87da | 510 | struct devlink_region *sram_region; |
8d7aab35 | 511 | struct devlink_region *devcaps_region; |
dce730f1 | 512 | |
2ae0aa47 WD |
513 | /* devlink port data */ |
514 | struct devlink_port devlink_port; | |
515 | ||
eb0208ec | 516 | /* OS reserved IRQ details */ |
940b61af | 517 | struct msix_entry *msix_entries; |
cbe66bfe BC |
518 | struct ice_res_tracker *irq_tracker; |
519 | /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the | |
520 | * number of MSIX vectors needed for all SR-IOV VFs from the number of | |
521 | * MSIX vectors allowed on this PF. | |
522 | */ | |
523 | u16 sriov_base_vector; | |
eb0208ec | 524 | |
148beb61 HT |
525 | u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ |
526 | ||
940b61af AV |
527 | struct ice_vsi **vsi; /* VSIs created by the driver */ |
528 | struct ice_sw *first_sw; /* first switch created by firmware */ | |
3ea9bd5d | 529 | u16 eswitch_mode; /* current mode of eswitch */ |
000773c0 | 530 | struct ice_vfs vfs; |
40b24760 | 531 | DECLARE_BITMAP(features, ICE_F_MAX); |
7e408e07 | 532 | DECLARE_BITMAP(state, ICE_STATE_NBITS); |
940b61af | 533 | DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); |
78b5713a AV |
534 | unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ |
535 | unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ | |
940b61af AV |
536 | unsigned long serv_tmr_period; |
537 | unsigned long serv_tmr_prev; | |
538 | struct timer_list serv_tmr; | |
539 | struct work_struct serv_task; | |
540 | struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ | |
541 | struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ | |
b94b013e | 542 | struct mutex tc_mutex; /* lock to protect TC changes */ |
486b9eee | 543 | struct mutex adev_mutex; /* lock to protect aux device access */ |
837f08fd | 544 | u32 msg_enable; |
06c16d89 | 545 | struct ice_ptp ptp; |
43113ff7 KK |
546 | struct tty_driver *ice_gnss_tty_driver; |
547 | struct tty_port gnss_tty_port; | |
548 | struct gnss_serial *gnss_serial; | |
d25a0fc4 DE |
549 | u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ |
550 | u16 rdma_base_vector; | |
d69ea414 JK |
551 | |
552 | /* spinlock to protect the AdminQ wait list */ | |
553 | spinlock_t aq_wait_lock; | |
554 | struct hlist_head aq_wait_list; | |
555 | wait_queue_head_t aq_wait_queue; | |
399e27db | 556 | bool fw_emp_reset_disabled; |
d69ea414 | 557 | |
1c08052e JK |
558 | wait_queue_head_t reset_wait_queue; |
559 | ||
d76a60ba | 560 | u32 hw_csum_rx_error; |
32d53c0a | 561 | u32 oicr_err_reg; |
88865fc4 KK |
562 | u16 oicr_idx; /* Other interrupt cause MSIX vector index */ |
563 | u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ | |
78b5713a AV |
564 | u16 max_pf_txqs; /* Total Tx queues PF wide */ |
565 | u16 max_pf_rxqs; /* Total Rx queues PF wide */ | |
88865fc4 | 566 | u16 num_lan_msix; /* Total MSIX vectors for base driver */ |
f9867df6 AV |
567 | u16 num_lan_tx; /* num LAN Tx queues setup */ |
568 | u16 num_lan_rx; /* num LAN Rx queues setup */ | |
940b61af AV |
569 | u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ |
570 | u16 num_alloc_vsi; | |
0b28b702 AV |
571 | u16 corer_count; /* Core reset count */ |
572 | u16 globr_count; /* Global reset count */ | |
573 | u16 empr_count; /* EMP reset count */ | |
574 | u16 pfr_count; /* PF reset count */ | |
575 | ||
769c500d AA |
576 | u8 wol_ena : 1; /* software state of WoL */ |
577 | u32 wakeup_reason; /* last wakeup reason */ | |
fcea6f3d AV |
578 | struct ice_hw_port_stats stats; |
579 | struct ice_hw_port_stats stats_prev; | |
837f08fd | 580 | struct ice_hw hw; |
0ab54c5f | 581 | u8 stat_prev_loaded:1; /* has previous stats been loaded */ |
e523af4e | 582 | u8 rdma_mode; |
7b9ffc76 | 583 | u16 dcbx_cap; |
b3969fd7 SM |
584 | u32 tx_timeout_count; |
585 | unsigned long tx_timeout_last_recovery; | |
586 | u32 tx_timeout_recovery_level; | |
940b61af | 587 | char int_name[ICE_INT_NAME_STR_LEN]; |
d25a0fc4 DE |
588 | struct auxiliary_device *adev; |
589 | int aux_idx; | |
0e674aeb | 590 | u32 sw_int_count; |
9fea7498 KP |
591 | /* count of tc_flower filters specific to channel (aka where filter |
592 | * action is "hw_tc <tc_num>") | |
593 | */ | |
594 | u16 num_dmac_chnl_fltrs; | |
0d08a441 KP |
595 | struct hlist_head tc_flower_fltr_list; |
596 | ||
1a3571b5 PG |
597 | __le64 nvm_phy_type_lo; /* NVM PHY type low */ |
598 | __le64 nvm_phy_type_hi; /* NVM PHY type high */ | |
ea78ce4d | 599 | struct ice_link_default_override_tlv link_dflt_override; |
df006dd4 | 600 | struct ice_lag *lag; /* Link Aggregation information */ |
b126bd6b | 601 | |
1a1c40df GN |
602 | struct ice_switchdev_info switchdev; |
603 | ||
b126bd6b KP |
604 | #define ICE_INVALID_AGG_NODE_ID 0 |
605 | #define ICE_PF_AGG_NODE_ID_START 1 | |
606 | #define ICE_MAX_PF_AGG_NODES 32 | |
607 | struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES]; | |
608 | #define ICE_VF_AGG_NODE_ID_START 65 | |
609 | #define ICE_MAX_VF_AGG_NODES 32 | |
610 | struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; | |
837f08fd | 611 | }; |
940b61af | 612 | |
3a858ba3 AV |
613 | struct ice_netdev_priv { |
614 | struct ice_vsi *vsi; | |
37165e3f | 615 | struct ice_repr *repr; |
195bb48f MS |
616 | /* indirect block callbacks on registered higher level devices |
617 | * (e.g. tunnel devices) | |
618 | * | |
619 | * tc_indr_block_cb_priv_list is used to look up indirect callback | |
620 | * private data | |
621 | */ | |
622 | struct list_head tc_indr_block_priv_list; | |
3a858ba3 AV |
623 | }; |
624 | ||
fbc7b27a KP |
625 | /** |
626 | * ice_vector_ch_enabled | |
627 | * @qv: pointer to q_vector, can be NULL | |
628 | * | |
629 | * This function returns true if vector is channel enabled otherwise false | |
630 | */ | |
631 | static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv) | |
632 | { | |
633 | return !!qv->ch; /* Enable it to run with TC */ | |
634 | } | |
635 | ||
940b61af AV |
636 | /** |
637 | * ice_irq_dynamic_ena - Enable default interrupt generation settings | |
f9867df6 AV |
638 | * @hw: pointer to HW struct |
639 | * @vsi: pointer to VSI struct, can be NULL | |
cdedef59 | 640 | * @q_vector: pointer to q_vector, can be NULL |
940b61af | 641 | */ |
c8b7abdd BA |
642 | static inline void |
643 | ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, | |
644 | struct ice_q_vector *q_vector) | |
940b61af | 645 | { |
b07833a0 | 646 | u32 vector = (vsi && q_vector) ? q_vector->reg_idx : |
cbe66bfe | 647 | ((struct ice_pf *)hw->back)->oicr_idx; |
940b61af AV |
648 | int itr = ICE_ITR_NONE; |
649 | u32 val; | |
650 | ||
651 | /* clear the PBA here, as this function is meant to clean out all | |
652 | * previous interrupts and enable the interrupt | |
653 | */ | |
654 | val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | | |
655 | (itr << GLINT_DYN_CTL_ITR_INDX_S); | |
cdedef59 | 656 | if (vsi) |
e97fb1ae | 657 | if (test_bit(ICE_VSI_DOWN, vsi->state)) |
cdedef59 | 658 | return; |
940b61af AV |
659 | wr32(hw, GLINT_DYN_CTL(vector), val); |
660 | } | |
cdedef59 | 661 | |
462acf6a TN |
662 | /** |
663 | * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev | |
664 | * @netdev: pointer to the netdev struct | |
665 | */ | |
666 | static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) | |
667 | { | |
668 | struct ice_netdev_priv *np = netdev_priv(netdev); | |
669 | ||
670 | return np->vsi->back; | |
671 | } | |
672 | ||
efc2214b MF |
673 | static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) |
674 | { | |
f9124c68 | 675 | return !!READ_ONCE(vsi->xdp_prog); |
efc2214b MF |
676 | } |
677 | ||
e72bba21 | 678 | static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) |
efc2214b MF |
679 | { |
680 | ring->flags |= ICE_TX_FLAGS_RING_XDP; | |
681 | } | |
682 | ||
2d4238f5 | 683 | /** |
1742b3d5 | 684 | * ice_xsk_pool - get XSK buffer pool bound to a ring |
e72bba21 | 685 | * @ring: Rx ring to use |
2d4238f5 | 686 | * |
1742b3d5 | 687 | * Returns a pointer to xdp_umem structure if there is a buffer pool present, |
2d4238f5 KK |
688 | * NULL otherwise. |
689 | */ | |
e72bba21 | 690 | static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) |
2d4238f5 | 691 | { |
e102db78 | 692 | struct ice_vsi *vsi = ring->vsi; |
65bb559b | 693 | u16 qid = ring->q_index; |
2d4238f5 | 694 | |
e72bba21 MF |
695 | if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) |
696 | return NULL; | |
697 | ||
698 | return xsk_get_pool_from_qid(vsi->netdev, qid); | |
699 | } | |
700 | ||
701 | /** | |
702 | * ice_tx_xsk_pool - get XSK buffer pool bound to a ring | |
703 | * @ring: Tx ring to use | |
704 | * | |
705 | * Returns a pointer to xdp_umem structure if there is a buffer pool present, | |
706 | * NULL otherwise. Tx equivalent of ice_xsk_pool. | |
707 | */ | |
708 | static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring) | |
709 | { | |
710 | struct ice_vsi *vsi = ring->vsi; | |
711 | u16 qid; | |
712 | ||
1ac2524d | 713 | qid = ring->q_index - vsi->alloc_txq; |
2d4238f5 | 714 | |
e102db78 | 715 | if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) |
2d4238f5 KK |
716 | return NULL; |
717 | ||
e102db78 | 718 | return xsk_get_pool_from_qid(vsi->netdev, qid); |
2d4238f5 KK |
719 | } |
720 | ||
c2a23e00 | 721 | /** |
208ff751 AV |
722 | * ice_get_main_vsi - Get the PF VSI |
723 | * @pf: PF instance | |
724 | * | |
725 | * returns pf->vsi[0], which by definition is the PF VSI | |
c2a23e00 | 726 | */ |
208ff751 | 727 | static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf) |
c2a23e00 | 728 | { |
208ff751 AV |
729 | if (pf->vsi) |
730 | return pf->vsi[0]; | |
c2a23e00 BC |
731 | |
732 | return NULL; | |
733 | } | |
734 | ||
7aae80ce WD |
735 | /** |
736 | * ice_get_netdev_priv_vsi - return VSI associated with netdev priv. | |
737 | * @np: private netdev structure | |
738 | */ | |
739 | static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np) | |
740 | { | |
741 | /* In case of port representor return source port VSI. */ | |
742 | if (np->repr) | |
743 | return np->repr->src_vsi; | |
744 | else | |
745 | return np->vsi; | |
746 | } | |
747 | ||
148beb61 HT |
748 | /** |
749 | * ice_get_ctrl_vsi - Get the control VSI | |
750 | * @pf: PF instance | |
751 | */ | |
752 | static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) | |
753 | { | |
754 | /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */ | |
755 | if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI) | |
756 | return NULL; | |
757 | ||
758 | return pf->vsi[pf->ctrl_vsi_idx]; | |
759 | } | |
760 | ||
1a1c40df GN |
761 | /** |
762 | * ice_is_switchdev_running - check if switchdev is configured | |
763 | * @pf: pointer to PF structure | |
764 | * | |
765 | * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV | |
766 | * and switchdev is configured, false otherwise. | |
767 | */ | |
768 | static inline bool ice_is_switchdev_running(struct ice_pf *pf) | |
769 | { | |
770 | return pf->switchdev.is_running; | |
771 | } | |
772 | ||
df006dd4 DE |
773 | /** |
774 | * ice_set_sriov_cap - enable SRIOV in PF flags | |
775 | * @pf: PF struct | |
776 | */ | |
777 | static inline void ice_set_sriov_cap(struct ice_pf *pf) | |
778 | { | |
779 | if (pf->hw.func_caps.common_cap.sr_iov_1_1) | |
780 | set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); | |
781 | } | |
782 | ||
783 | /** | |
784 | * ice_clear_sriov_cap - disable SRIOV in PF flags | |
785 | * @pf: PF struct | |
786 | */ | |
787 | static inline void ice_clear_sriov_cap(struct ice_pf *pf) | |
788 | { | |
789 | clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); | |
790 | } | |
791 | ||
4ab95646 HT |
792 | #define ICE_FD_STAT_CTR_BLOCK_COUNT 256 |
793 | #define ICE_FD_STAT_PF_IDX(base_idx) \ | |
794 | ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT) | |
795 | #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx) | |
40319796 KP |
796 | #define ICE_FD_STAT_CH 1 |
797 | #define ICE_FD_CH_STAT_IDX(base_idx) \ | |
798 | (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH) | |
4ab95646 | 799 | |
0754d65b KP |
800 | /** |
801 | * ice_is_adq_active - any active ADQs | |
802 | * @pf: pointer to PF | |
803 | * | |
804 | * This function returns true if there are any ADQs configured (which is | |
805 | * determined by looking at VSI type (which should be VSI_PF), numtc, and | |
806 | * TC_MQPRIO flag) otherwise return false | |
807 | */ | |
808 | static inline bool ice_is_adq_active(struct ice_pf *pf) | |
809 | { | |
810 | struct ice_vsi *vsi; | |
811 | ||
812 | vsi = ice_get_main_vsi(pf); | |
813 | if (!vsi) | |
814 | return false; | |
815 | ||
816 | /* is ADQ configured */ | |
817 | if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC && | |
818 | test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) | |
819 | return true; | |
820 | ||
821 | return false; | |
822 | } | |
823 | ||
df006dd4 | 824 | bool netif_is_ice(struct net_device *dev); |
0e674aeb AV |
825 | int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); |
826 | int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); | |
148beb61 | 827 | int ice_vsi_open_ctrl(struct ice_vsi *vsi); |
1a1c40df | 828 | int ice_vsi_open(struct ice_vsi *vsi); |
fcea6f3d | 829 | void ice_set_ethtool_ops(struct net_device *netdev); |
7aae80ce | 830 | void ice_set_ethtool_repr_ops(struct net_device *netdev); |
462acf6a | 831 | void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); |
8c243700 AV |
832 | u16 ice_get_avail_txq_count(struct ice_pf *pf); |
833 | u16 ice_get_avail_rxq_count(struct ice_pf *pf); | |
87324e74 | 834 | int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); |
5a4a8673 BA |
835 | void ice_update_vsi_stats(struct ice_vsi *vsi); |
836 | void ice_update_pf_stats(struct ice_pf *pf); | |
c8ff29b5 MS |
837 | void |
838 | ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, | |
839 | struct ice_q_stats stats, u64 *pkts, u64 *bytes); | |
fcea6f3d AV |
840 | int ice_up(struct ice_vsi *vsi); |
841 | int ice_down(struct ice_vsi *vsi); | |
0e674aeb AV |
842 | int ice_vsi_cfg(struct ice_vsi *vsi); |
843 | struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); | |
22bf877e | 844 | int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); |
efc2214b MF |
845 | int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); |
846 | int ice_destroy_xdp_rings(struct ice_vsi *vsi); | |
847 | int | |
848 | ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |
849 | u32 flags); | |
b66a972a BC |
850 | int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); |
851 | int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size); | |
852 | int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed); | |
853 | int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed); | |
d76a60ba | 854 | void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); |
87324e74 | 855 | int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); |
fcea6f3d | 856 | void ice_print_link_msg(struct ice_vsi *vsi, bool isup); |
f9f5301e DE |
857 | int ice_plug_aux_dev(struct ice_pf *pf); |
858 | void ice_unplug_aux_dev(struct ice_pf *pf); | |
d25a0fc4 | 859 | int ice_init_rdma(struct ice_pf *pf); |
0fee3577 | 860 | const char *ice_aq_str(enum ice_aq_err aq_err); |
31765519 | 861 | bool ice_is_wol_supported(struct ice_hw *hw); |
40319796 | 862 | void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); |
28bf2672 BC |
863 | int |
864 | ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, | |
865 | bool is_tun); | |
148beb61 | 866 | void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena); |
cac2a27c HT |
867 | int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); |
868 | int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd); | |
4ab95646 HT |
869 | int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd); |
870 | int | |
871 | ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd, | |
872 | u32 *rule_locs); | |
40319796 | 873 | void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx); |
148beb61 | 874 | void ice_fdir_release_flows(struct ice_hw *hw); |
83af0039 HT |
875 | void ice_fdir_replay_flows(struct ice_hw *hw); |
876 | void ice_fdir_replay_fltrs(struct ice_pf *pf); | |
148beb61 | 877 | int ice_fdir_create_dflt_rules(struct ice_pf *pf); |
d69ea414 JK |
878 | int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, |
879 | struct ice_rq_event_info *event); | |
0e674aeb | 880 | int ice_open(struct net_device *netdev); |
e95fc857 | 881 | int ice_open_internal(struct net_device *netdev); |
0e674aeb | 882 | int ice_stop(struct net_device *netdev); |
28bf2672 | 883 | void ice_service_task_schedule(struct ice_pf *pf); |
d76a60ba | 884 | |
d25a0fc4 DE |
885 | /** |
886 | * ice_set_rdma_cap - enable RDMA support | |
887 | * @pf: PF struct | |
888 | */ | |
889 | static inline void ice_set_rdma_cap(struct ice_pf *pf) | |
890 | { | |
f9f5301e | 891 | if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { |
d25a0fc4 | 892 | set_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
5dbbbd01 | 893 | set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); |
f9f5301e | 894 | } |
d25a0fc4 DE |
895 | } |
896 | ||
897 | /** | |
898 | * ice_clear_rdma_cap - disable RDMA support | |
899 | * @pf: PF struct | |
900 | */ | |
901 | static inline void ice_clear_rdma_cap(struct ice_pf *pf) | |
902 | { | |
5cb1ebdb IV |
903 | /* We can directly unplug aux device here only if the flag bit |
904 | * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev() | |
905 | * could race with ice_plug_aux_dev() called from | |
906 | * ice_service_task(). In this case we only clear that bit now and | |
907 | * aux device will be unplugged later once ice_plug_aux_device() | |
908 | * called from ice_service_task() finishes (see ice_service_task()). | |
909 | */ | |
910 | if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) | |
911 | ice_unplug_aux_dev(pf); | |
912 | ||
d25a0fc4 DE |
913 | clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); |
914 | } | |
837f08fd | 915 | #endif /* _ICE_H_ */ |