Commit | Line | Data |
---|---|---|
940b61af AV |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright (c) 2018, Intel Corporation. */ | |
3 | ||
4 | #ifndef _ICE_TXRX_H_ | |
5 | #define _ICE_TXRX_H_ | |
6 | ||
2d4238f5 KK |
7 | #include "ice_type.h" |
8 | ||
940b61af | 9 | #define ICE_DFLT_IRQ_WORK 256 |
7237f5b0 | 10 | #define ICE_RXBUF_3072 3072 |
cdedef59 | 11 | #define ICE_RXBUF_2048 2048 |
7237f5b0 | 12 | #define ICE_RXBUF_1536 1536 |
cdedef59 | 13 | #define ICE_MAX_CHAINED_RX_BUFS 5 |
2b245cb2 AV |
14 | #define ICE_MAX_BUF_TXD 8 |
15 | #define ICE_MIN_TX_LEN 17 | |
16 | ||
17 | /* The size limit for a transmit buffer in a descriptor is (16K - 1). | |
18 | * In order to align with the read requests we will align the value to | |
19 | * the nearest 4K which represents our maximum read request size. | |
20 | */ | |
21 | #define ICE_MAX_READ_REQ_SIZE 4096 | |
22 | #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1) | |
23 | #define ICE_MAX_DATA_PER_TXD_ALIGNED \ | |
24 | (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) | |
25 | ||
26 | #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */ | |
cdedef59 AV |
27 | #define ICE_MAX_TXQ_PER_TXQG 128 |
28 | ||
59bb0808 MF |
29 | /* Attempt to maximize the headroom available for incoming frames. We use a 2K |
30 | * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. | |
31 | * This leaves us with 512 bytes of room. From that we need to deduct the | |
32 | * space needed for the shared info and the padding needed to IP align the | |
33 | * frame. | |
34 | * | |
35 | * Note: For cache line sizes 256 or larger this value is going to end | |
4ee656bb TN |
36 | * up negative. In these cases we should fall back to the legacy |
37 | * receive path. | |
59bb0808 MF |
38 | */ |
39 | #if (PAGE_SIZE < 8192) | |
40 | #define ICE_2K_TOO_SMALL_WITH_PADDING \ | |
41 | ((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) | |
42 | ||
43 | /** | |
44 | * ice_compute_pad - compute the padding | |
45 | * rx_buf_len: buffer length | |
46 | * | |
47 | * Figure out the size of half page based on given buffer length and | |
48 | * then subtract the skb_shared_info followed by subtraction of the | |
49 | * actual buffer length; this in turn results in the actual space that | |
50 | * is left for padding usage | |
51 | */ | |
52 | static inline int ice_compute_pad(int rx_buf_len) | |
53 | { | |
54 | int half_page_size; | |
55 | ||
56 | half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); | |
57 | return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; | |
58 | } | |
59 | ||
60 | /** | |
61 | * ice_skb_pad - determine the padding that we can supply | |
62 | * | |
63 | * Figure out the right Rx buffer size and based on that calculate the | |
64 | * padding | |
65 | */ | |
66 | static inline int ice_skb_pad(void) | |
67 | { | |
68 | int rx_buf_len; | |
69 | ||
70 | /* If a 2K buffer cannot handle a standard Ethernet frame then | |
71 | * optimize padding for a 3K buffer instead of a 1.5K buffer. | |
72 | * | |
73 | * For a 3K buffer we need to add enough padding to allow for | |
74 | * tailroom due to NET_IP_ALIGN possibly shifting us out of | |
75 | * cache-line alignment. | |
76 | */ | |
77 | if (ICE_2K_TOO_SMALL_WITH_PADDING) | |
78 | rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); | |
79 | else | |
80 | rx_buf_len = ICE_RXBUF_1536; | |
81 | ||
82 | /* if needed make room for NET_IP_ALIGN */ | |
83 | rx_buf_len -= NET_IP_ALIGN; | |
84 | ||
85 | return ice_compute_pad(rx_buf_len); | |
86 | } | |
87 | ||
88 | #define ICE_SKB_PAD ice_skb_pad() | |
89 | #else | |
90 | #define ICE_2K_TOO_SMALL_WITH_PADDING false | |
91 | #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) | |
92 | #endif | |
93 | ||
c585ea42 BC |
94 | /* We are assuming that the cache line is always 64 Bytes here for ice. |
95 | * In order to make sure that is a correct assumption there is a check in probe | |
96 | * to print a warning if the read from GLPCI_CNF2 tells us that the cache line | |
97 | * size is 128 bytes. We do it this way because we do not want to read the | |
98 | * GLPCI_CNF2 register or a variable containing the value on every pass through | |
99 | * the Tx path. | |
100 | */ | |
101 | #define ICE_CACHE_LINE_BYTES 64 | |
102 | #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ | |
103 | sizeof(struct ice_tx_desc)) | |
104 | #define ICE_DESCS_FOR_CTX_DESC 1 | |
105 | #define ICE_DESCS_FOR_SKB_DATA_PTR 1 | |
106 | /* Tx descriptors needed, worst case */ | |
107 | #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ | |
108 | ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) | |
cdedef59 AV |
109 | #define ICE_DESC_UNUSED(R) \ |
110 | ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ | |
111 | (R)->next_to_clean - (R)->next_to_use - 1) | |
112 | ||
d76a60ba AV |
113 | #define ICE_TX_FLAGS_TSO BIT(0) |
114 | #define ICE_TX_FLAGS_HW_VLAN BIT(1) | |
115 | #define ICE_TX_FLAGS_SW_VLAN BIT(2) | |
116 | #define ICE_TX_FLAGS_VLAN_M 0xffff0000 | |
5f6aa50e AV |
117 | #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 |
118 | #define ICE_TX_FLAGS_VLAN_PR_S 29 | |
d76a60ba AV |
119 | #define ICE_TX_FLAGS_VLAN_S 16 |
120 | ||
efc2214b MF |
121 | #define ICE_XDP_PASS 0 |
122 | #define ICE_XDP_CONSUMED BIT(0) | |
123 | #define ICE_XDP_TX BIT(1) | |
124 | #define ICE_XDP_REDIR BIT(2) | |
125 | ||
a65f71fe MF |
126 | #define ICE_RX_DMA_ATTR \ |
127 | (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) | |
128 | ||
efc2214b MF |
129 | #define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) |
130 | ||
131 | #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) | |
132 | ||
cdedef59 AV |
133 | struct ice_tx_buf { |
134 | struct ice_tx_desc *next_to_watch; | |
efc2214b MF |
135 | union { |
136 | struct sk_buff *skb; | |
137 | void *raw_buf; /* used for XDP */ | |
138 | }; | |
cdedef59 AV |
139 | unsigned int bytecount; |
140 | unsigned short gso_segs; | |
141 | u32 tx_flags; | |
cdedef59 | 142 | DEFINE_DMA_UNMAP_LEN(len); |
65124bbf | 143 | DEFINE_DMA_UNMAP_ADDR(dma); |
cdedef59 AV |
144 | }; |
145 | ||
d76a60ba | 146 | struct ice_tx_offload_params { |
65124bbf JB |
147 | u64 cd_qw1; |
148 | struct ice_ring *tx_ring; | |
d76a60ba AV |
149 | u32 td_cmd; |
150 | u32 td_offset; | |
151 | u32 td_l2tag1; | |
d76a60ba | 152 | u32 cd_tunnel_params; |
65124bbf JB |
153 | u16 cd_l2tag2; |
154 | u8 header_len; | |
d76a60ba AV |
155 | }; |
156 | ||
cdedef59 AV |
157 | struct ice_rx_buf { |
158 | struct sk_buff *skb; | |
159 | dma_addr_t dma; | |
2d4238f5 KK |
160 | union { |
161 | struct { | |
162 | struct page *page; | |
163 | unsigned int page_offset; | |
164 | u16 pagecnt_bias; | |
165 | }; | |
166 | struct { | |
167 | void *addr; | |
168 | u64 handle; | |
169 | }; | |
170 | }; | |
cdedef59 | 171 | }; |
940b61af | 172 | |
2b245cb2 AV |
173 | struct ice_q_stats { |
174 | u64 pkts; | |
175 | u64 bytes; | |
176 | }; | |
177 | ||
178 | struct ice_txq_stats { | |
179 | u64 restart_q; | |
180 | u64 tx_busy; | |
181 | u64 tx_linearize; | |
b3969fd7 | 182 | int prev_pkt; /* negative if no pending Tx descriptors */ |
2b245cb2 AV |
183 | }; |
184 | ||
185 | struct ice_rxq_stats { | |
186 | u64 non_eop_descs; | |
187 | u64 alloc_page_failed; | |
188 | u64 alloc_buf_failed; | |
189 | u64 page_reuse_count; | |
190 | }; | |
191 | ||
940b61af AV |
192 | /* this enum matches hardware bits and is meant to be used by DYN_CTLN |
193 | * registers and QINT registers or more generally anywhere in the manual | |
194 | * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any | |
195 | * register but instead is a special value meaning "don't update" ITR0/1/2. | |
196 | */ | |
197 | enum ice_dyn_idx_t { | |
198 | ICE_IDX_ITR0 = 0, | |
199 | ICE_IDX_ITR1 = 1, | |
200 | ICE_IDX_ITR2 = 2, | |
201 | ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ | |
202 | }; | |
203 | ||
cdedef59 AV |
204 | /* Header split modes defined by DTYPE field of Rx RLAN context */ |
205 | enum ice_rx_dtype { | |
206 | ICE_RX_DTYPE_NO_SPLIT = 0, | |
207 | ICE_RX_DTYPE_HEADER_SPLIT = 1, | |
208 | ICE_RX_DTYPE_SPLIT_ALWAYS = 2, | |
209 | }; | |
210 | ||
940b61af AV |
211 | /* indices into GLINT_ITR registers */ |
212 | #define ICE_RX_ITR ICE_IDX_ITR0 | |
cdedef59 | 213 | #define ICE_TX_ITR ICE_IDX_ITR1 |
63f545ed | 214 | #define ICE_ITR_8K 124 |
d2b464a7 | 215 | #define ICE_ITR_20K 50 |
67fe64d7 | 216 | #define ICE_ITR_MAX 8160 |
63f545ed BC |
217 | #define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) |
218 | #define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) | |
219 | #define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */ | |
67fe64d7 | 220 | #define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC)) |
63f545ed | 221 | #define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC) |
92414f32 | 222 | #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ |
70457520 | 223 | #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) |
63f545ed | 224 | #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ |
840f8ad0 | 225 | #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK) |
940b61af | 226 | |
64a59d05 AV |
227 | #define ICE_ITR_ADAPTIVE_MIN_INC 0x0002 |
228 | #define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002 | |
229 | #define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA | |
230 | #define ICE_ITR_ADAPTIVE_LATENCY 0x8000 | |
231 | #define ICE_ITR_ADAPTIVE_BULK 0x0000 | |
232 | ||
9e4ab4c2 | 233 | #define ICE_DFLT_INTRL 0 |
b9c8bb06 | 234 | #define ICE_MAX_INTRL 236 |
940b61af | 235 | |
2ab28bb0 BC |
236 | #define ICE_WB_ON_ITR_USECS 2 |
237 | #define ICE_IN_WB_ON_ITR_MODE 255 | |
238 | /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows | |
239 | * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also, | |
240 | * set the write-back latency to the usecs passed in. | |
241 | */ | |
242 | #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \ | |
243 | ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \ | |
244 | GLINT_DYN_CTL_INTERVAL_M) | \ | |
245 | (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \ | |
246 | GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \ | |
247 | GLINT_DYN_CTL_WB_ON_ITR_M) | |
248 | ||
cdedef59 AV |
249 | /* Legacy or Advanced Mode Queue */ |
250 | #define ICE_TX_ADVANCED 0 | |
251 | #define ICE_TX_LEGACY 1 | |
252 | ||
3a858ba3 AV |
253 | /* descriptor ring, associated with a VSI */ |
254 | struct ice_ring { | |
65124bbf | 255 | /* CL1 - 1st cacheline starts here */ |
3a858ba3 | 256 | struct ice_ring *next; /* pointer to next ring in q_vector */ |
cdedef59 | 257 | void *desc; /* Descriptor ring memory */ |
3a858ba3 AV |
258 | struct device *dev; /* Used for DMA mapping */ |
259 | struct net_device *netdev; /* netdev ring maps to */ | |
260 | struct ice_vsi *vsi; /* Backreference to associated VSI */ | |
261 | struct ice_q_vector *q_vector; /* Backreference to associated vector */ | |
cdedef59 AV |
262 | u8 __iomem *tail; |
263 | union { | |
264 | struct ice_tx_buf *tx_buf; | |
265 | struct ice_rx_buf *rx_buf; | |
266 | }; | |
65124bbf | 267 | /* CL2 - 2nd cacheline starts here */ |
3a858ba3 | 268 | u16 q_index; /* Queue number of ring */ |
65124bbf JB |
269 | u16 q_handle; /* Queue handle per TC */ |
270 | ||
0ab54c5f | 271 | u8 ring_active:1; /* is ring online or not */ |
cdedef59 | 272 | |
3a858ba3 AV |
273 | u16 count; /* Number of descriptors */ |
274 | u16 reg_idx; /* HW register index of the ring */ | |
cdedef59 AV |
275 | |
276 | /* used in interrupt processing */ | |
277 | u16 next_to_use; | |
278 | u16 next_to_clean; | |
65124bbf | 279 | u16 next_to_alloc; |
2b245cb2 AV |
280 | |
281 | /* stats structs */ | |
282 | struct ice_q_stats stats; | |
283 | struct u64_stats_sync syncp; | |
284 | union { | |
285 | struct ice_txq_stats tx_stats; | |
286 | struct ice_rxq_stats rx_stats; | |
287 | }; | |
288 | ||
3a858ba3 | 289 | struct rcu_head rcu; /* to avoid race on free */ |
efc2214b | 290 | struct bpf_prog *xdp_prog; |
2d4238f5 KK |
291 | struct xdp_umem *xsk_umem; |
292 | struct zero_copy_allocator zca; | |
efc2214b MF |
293 | /* CL3 - 3rd cacheline starts here */ |
294 | struct xdp_rxq_info xdp_rxq; | |
65124bbf JB |
295 | /* CLX - the below items are only accessed infrequently and should be |
296 | * in their own cache line if possible | |
297 | */ | |
efc2214b | 298 | #define ICE_TX_FLAGS_RING_XDP BIT(0) |
59bb0808 | 299 | #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) |
efc2214b | 300 | u8 flags; |
65124bbf JB |
301 | dma_addr_t dma; /* physical address of ring */ |
302 | unsigned int size; /* length of descriptor ring in bytes */ | |
303 | u32 txq_teid; /* Added Tx queue TEID */ | |
304 | u16 rx_buf_len; | |
65124bbf | 305 | u8 dcb_tc; /* Traffic class of ring */ |
3a858ba3 AV |
306 | } ____cacheline_internodealigned_in_smp; |
307 | ||
59bb0808 MF |
308 | static inline bool ice_ring_uses_build_skb(struct ice_ring *ring) |
309 | { | |
310 | return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); | |
311 | } | |
312 | ||
313 | static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring) | |
314 | { | |
315 | ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; | |
316 | } | |
317 | ||
318 | static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring) | |
319 | { | |
320 | ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; | |
321 | } | |
322 | ||
efc2214b MF |
323 | static inline bool ice_ring_is_xdp(struct ice_ring *ring) |
324 | { | |
325 | return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); | |
326 | } | |
327 | ||
3a858ba3 | 328 | struct ice_ring_container { |
63f545ed | 329 | /* head of linked-list of rings */ |
3a858ba3 | 330 | struct ice_ring *ring; |
63f545ed | 331 | unsigned long next_update; /* jiffies value of next queue update */ |
3a858ba3 AV |
332 | unsigned int total_bytes; /* total bytes processed this int */ |
333 | unsigned int total_pkts; /* total packets processed this int */ | |
8244dd2d | 334 | u16 itr_idx; /* index in the interrupt vector */ |
63f545ed BC |
335 | u16 target_itr; /* value in usecs divided by the hw->itr_gran */ |
336 | u16 current_itr; /* value in usecs divided by the hw->itr_gran */ | |
337 | /* high bit set means dynamic ITR, rest is used to store user | |
338 | * readable ITR value in usecs and must be converted before programming | |
339 | * to a register. | |
340 | */ | |
341 | u16 itr_setting; | |
3a858ba3 AV |
342 | }; |
343 | ||
61dc79ce MS |
344 | struct ice_coalesce_stored { |
345 | u16 itr_tx; | |
346 | u16 itr_rx; | |
347 | u8 intrl; | |
348 | }; | |
349 | ||
3a858ba3 AV |
350 | /* iterator for handling rings in ring container */ |
351 | #define ice_for_each_ring(pos, head) \ | |
352 | for (pos = (head).ring; pos; pos = pos->next) | |
353 | ||
7237f5b0 MF |
354 | static inline unsigned int ice_rx_pg_order(struct ice_ring *ring) |
355 | { | |
356 | #if (PAGE_SIZE < 8192) | |
357 | if (ring->rx_buf_len > (PAGE_SIZE / 2)) | |
358 | return 1; | |
359 | #endif | |
360 | return 0; | |
361 | } | |
362 | ||
363 | #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) | |
364 | ||
2d4238f5 KK |
365 | union ice_32b_rx_flex_desc; |
366 | ||
cdedef59 | 367 | bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); |
2b245cb2 | 368 | netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); |
cdedef59 AV |
369 | void ice_clean_tx_ring(struct ice_ring *tx_ring); |
370 | void ice_clean_rx_ring(struct ice_ring *rx_ring); | |
371 | int ice_setup_tx_ring(struct ice_ring *tx_ring); | |
372 | int ice_setup_rx_ring(struct ice_ring *rx_ring); | |
373 | void ice_free_tx_ring(struct ice_ring *tx_ring); | |
374 | void ice_free_rx_ring(struct ice_ring *rx_ring); | |
2b245cb2 AV |
375 | int ice_napi_poll(struct napi_struct *napi, int budget); |
376 | ||
940b61af | 377 | #endif /* _ICE_TXRX_H_ */ |