virtchnl: Add missing explicit padding to structures
[linux-block.git] / drivers / net / ethernet / intel / ice / ice_txrx.h
CommitLineData
940b61af
AV
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_TXRX_H_
5#define _ICE_TXRX_H_
6
2d4238f5
KK
7#include "ice_type.h"
8
940b61af 9#define ICE_DFLT_IRQ_WORK 256
7237f5b0 10#define ICE_RXBUF_3072 3072
cdedef59 11#define ICE_RXBUF_2048 2048
7237f5b0 12#define ICE_RXBUF_1536 1536
cdedef59 13#define ICE_MAX_CHAINED_RX_BUFS 5
2b245cb2
AV
14#define ICE_MAX_BUF_TXD 8
15#define ICE_MIN_TX_LEN 17
16
17/* The size limit for a transmit buffer in a descriptor is (16K - 1).
18 * In order to align with the read requests we will align the value to
19 * the nearest 4K which represents our maximum read request size.
20 */
21#define ICE_MAX_READ_REQ_SIZE 4096
22#define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
23#define ICE_MAX_DATA_PER_TXD_ALIGNED \
24 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
25
26#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
cdedef59
AV
27#define ICE_MAX_TXQ_PER_TXQG 128
28
59bb0808
MF
29/* Attempt to maximize the headroom available for incoming frames. We use a 2K
30 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
31 * This leaves us with 512 bytes of room. From that we need to deduct the
32 * space needed for the shared info and the padding needed to IP align the
33 * frame.
34 *
35 * Note: For cache line sizes 256 or larger this value is going to end
4ee656bb
TN
36 * up negative. In these cases we should fall back to the legacy
37 * receive path.
59bb0808
MF
38 */
39#if (PAGE_SIZE < 8192)
40#define ICE_2K_TOO_SMALL_WITH_PADDING \
41((NET_SKB_PAD + ICE_RXBUF_1536) > SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
42
43/**
44 * ice_compute_pad - compute the padding
45 * rx_buf_len: buffer length
46 *
47 * Figure out the size of half page based on given buffer length and
48 * then subtract the skb_shared_info followed by subtraction of the
49 * actual buffer length; this in turn results in the actual space that
50 * is left for padding usage
51 */
52static inline int ice_compute_pad(int rx_buf_len)
53{
54 int half_page_size;
55
56 half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
57 return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
58}
59
60/**
61 * ice_skb_pad - determine the padding that we can supply
62 *
63 * Figure out the right Rx buffer size and based on that calculate the
64 * padding
65 */
66static inline int ice_skb_pad(void)
67{
68 int rx_buf_len;
69
70 /* If a 2K buffer cannot handle a standard Ethernet frame then
71 * optimize padding for a 3K buffer instead of a 1.5K buffer.
72 *
73 * For a 3K buffer we need to add enough padding to allow for
74 * tailroom due to NET_IP_ALIGN possibly shifting us out of
75 * cache-line alignment.
76 */
77 if (ICE_2K_TOO_SMALL_WITH_PADDING)
78 rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
79 else
80 rx_buf_len = ICE_RXBUF_1536;
81
82 /* if needed make room for NET_IP_ALIGN */
83 rx_buf_len -= NET_IP_ALIGN;
84
85 return ice_compute_pad(rx_buf_len);
86}
87
88#define ICE_SKB_PAD ice_skb_pad()
89#else
90#define ICE_2K_TOO_SMALL_WITH_PADDING false
91#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
92#endif
93
c585ea42
BC
94/* We are assuming that the cache line is always 64 Bytes here for ice.
95 * In order to make sure that is a correct assumption there is a check in probe
96 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
97 * size is 128 bytes. We do it this way because we do not want to read the
98 * GLPCI_CNF2 register or a variable containing the value on every pass through
99 * the Tx path.
100 */
101#define ICE_CACHE_LINE_BYTES 64
102#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
103 sizeof(struct ice_tx_desc))
104#define ICE_DESCS_FOR_CTX_DESC 1
105#define ICE_DESCS_FOR_SKB_DATA_PTR 1
106/* Tx descriptors needed, worst case */
107#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
108 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
cdedef59
AV
109#define ICE_DESC_UNUSED(R) \
110 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
111 (R)->next_to_clean - (R)->next_to_use - 1)
112
d76a60ba
AV
113#define ICE_TX_FLAGS_TSO BIT(0)
114#define ICE_TX_FLAGS_HW_VLAN BIT(1)
115#define ICE_TX_FLAGS_SW_VLAN BIT(2)
a4e82a81
TN
116#define ICE_TX_FLAGS_IPV4 BIT(5)
117#define ICE_TX_FLAGS_IPV6 BIT(6)
118#define ICE_TX_FLAGS_TUNNEL BIT(7)
d76a60ba 119#define ICE_TX_FLAGS_VLAN_M 0xffff0000
5f6aa50e
AV
120#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
121#define ICE_TX_FLAGS_VLAN_PR_S 29
d76a60ba
AV
122#define ICE_TX_FLAGS_VLAN_S 16
123
efc2214b
MF
124#define ICE_XDP_PASS 0
125#define ICE_XDP_CONSUMED BIT(0)
126#define ICE_XDP_TX BIT(1)
127#define ICE_XDP_REDIR BIT(2)
128
a65f71fe
MF
129#define ICE_RX_DMA_ATTR \
130 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
131
efc2214b
MF
132#define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
133
134#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
135
cdedef59
AV
136struct ice_tx_buf {
137 struct ice_tx_desc *next_to_watch;
efc2214b
MF
138 union {
139 struct sk_buff *skb;
140 void *raw_buf; /* used for XDP */
141 };
cdedef59
AV
142 unsigned int bytecount;
143 unsigned short gso_segs;
144 u32 tx_flags;
cdedef59 145 DEFINE_DMA_UNMAP_LEN(len);
65124bbf 146 DEFINE_DMA_UNMAP_ADDR(dma);
cdedef59
AV
147};
148
d76a60ba 149struct ice_tx_offload_params {
65124bbf
JB
150 u64 cd_qw1;
151 struct ice_ring *tx_ring;
d76a60ba
AV
152 u32 td_cmd;
153 u32 td_offset;
154 u32 td_l2tag1;
d76a60ba 155 u32 cd_tunnel_params;
65124bbf
JB
156 u16 cd_l2tag2;
157 u8 header_len;
d76a60ba
AV
158};
159
cdedef59
AV
160struct ice_rx_buf {
161 struct sk_buff *skb;
162 dma_addr_t dma;
2d4238f5
KK
163 union {
164 struct {
165 struct page *page;
166 unsigned int page_offset;
167 u16 pagecnt_bias;
168 };
169 struct {
170 void *addr;
171 u64 handle;
172 };
173 };
cdedef59 174};
940b61af 175
2b245cb2
AV
176struct ice_q_stats {
177 u64 pkts;
178 u64 bytes;
179};
180
181struct ice_txq_stats {
182 u64 restart_q;
183 u64 tx_busy;
184 u64 tx_linearize;
b3969fd7 185 int prev_pkt; /* negative if no pending Tx descriptors */
2b245cb2
AV
186};
187
188struct ice_rxq_stats {
189 u64 non_eop_descs;
190 u64 alloc_page_failed;
191 u64 alloc_buf_failed;
192 u64 page_reuse_count;
193};
194
940b61af
AV
195/* this enum matches hardware bits and is meant to be used by DYN_CTLN
196 * registers and QINT registers or more generally anywhere in the manual
197 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
198 * register but instead is a special value meaning "don't update" ITR0/1/2.
199 */
200enum ice_dyn_idx_t {
201 ICE_IDX_ITR0 = 0,
202 ICE_IDX_ITR1 = 1,
203 ICE_IDX_ITR2 = 2,
204 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
205};
206
cdedef59
AV
207/* Header split modes defined by DTYPE field of Rx RLAN context */
208enum ice_rx_dtype {
209 ICE_RX_DTYPE_NO_SPLIT = 0,
210 ICE_RX_DTYPE_HEADER_SPLIT = 1,
211 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
212};
213
940b61af
AV
214/* indices into GLINT_ITR registers */
215#define ICE_RX_ITR ICE_IDX_ITR0
cdedef59 216#define ICE_TX_ITR ICE_IDX_ITR1
63f545ed 217#define ICE_ITR_8K 124
d2b464a7 218#define ICE_ITR_20K 50
67fe64d7 219#define ICE_ITR_MAX 8160
63f545ed
BC
220#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
221#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
222#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
67fe64d7 223#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
63f545ed 224#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
92414f32 225#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
70457520 226#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
63f545ed 227#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
840f8ad0 228#define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK)
940b61af 229
64a59d05
AV
230#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
231#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
232#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
233#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
234#define ICE_ITR_ADAPTIVE_BULK 0x0000
235
9e4ab4c2 236#define ICE_DFLT_INTRL 0
b9c8bb06 237#define ICE_MAX_INTRL 236
940b61af 238
2ab28bb0
BC
239#define ICE_WB_ON_ITR_USECS 2
240#define ICE_IN_WB_ON_ITR_MODE 255
241/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
242 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
243 * set the write-back latency to the usecs passed in.
244 */
245#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
246 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
247 GLINT_DYN_CTL_INTERVAL_M) | \
248 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
249 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
250 GLINT_DYN_CTL_WB_ON_ITR_M)
251
cdedef59
AV
252/* Legacy or Advanced Mode Queue */
253#define ICE_TX_ADVANCED 0
254#define ICE_TX_LEGACY 1
255
3a858ba3
AV
256/* descriptor ring, associated with a VSI */
257struct ice_ring {
65124bbf 258 /* CL1 - 1st cacheline starts here */
3a858ba3 259 struct ice_ring *next; /* pointer to next ring in q_vector */
cdedef59 260 void *desc; /* Descriptor ring memory */
3a858ba3
AV
261 struct device *dev; /* Used for DMA mapping */
262 struct net_device *netdev; /* netdev ring maps to */
263 struct ice_vsi *vsi; /* Backreference to associated VSI */
264 struct ice_q_vector *q_vector; /* Backreference to associated vector */
cdedef59
AV
265 u8 __iomem *tail;
266 union {
267 struct ice_tx_buf *tx_buf;
268 struct ice_rx_buf *rx_buf;
269 };
65124bbf 270 /* CL2 - 2nd cacheline starts here */
3a858ba3 271 u16 q_index; /* Queue number of ring */
65124bbf
JB
272 u16 q_handle; /* Queue handle per TC */
273
0ab54c5f 274 u8 ring_active:1; /* is ring online or not */
cdedef59 275
3a858ba3
AV
276 u16 count; /* Number of descriptors */
277 u16 reg_idx; /* HW register index of the ring */
cdedef59
AV
278
279 /* used in interrupt processing */
280 u16 next_to_use;
281 u16 next_to_clean;
65124bbf 282 u16 next_to_alloc;
2b245cb2
AV
283
284 /* stats structs */
285 struct ice_q_stats stats;
286 struct u64_stats_sync syncp;
287 union {
288 struct ice_txq_stats tx_stats;
289 struct ice_rxq_stats rx_stats;
290 };
291
3a858ba3 292 struct rcu_head rcu; /* to avoid race on free */
efc2214b 293 struct bpf_prog *xdp_prog;
2d4238f5
KK
294 struct xdp_umem *xsk_umem;
295 struct zero_copy_allocator zca;
efc2214b
MF
296 /* CL3 - 3rd cacheline starts here */
297 struct xdp_rxq_info xdp_rxq;
65124bbf
JB
298 /* CLX - the below items are only accessed infrequently and should be
299 * in their own cache line if possible
300 */
efc2214b 301#define ICE_TX_FLAGS_RING_XDP BIT(0)
59bb0808 302#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
efc2214b 303 u8 flags;
65124bbf
JB
304 dma_addr_t dma; /* physical address of ring */
305 unsigned int size; /* length of descriptor ring in bytes */
306 u32 txq_teid; /* Added Tx queue TEID */
307 u16 rx_buf_len;
65124bbf 308 u8 dcb_tc; /* Traffic class of ring */
3a858ba3
AV
309} ____cacheline_internodealigned_in_smp;
310
59bb0808
MF
311static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
312{
313 return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
314}
315
316static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
317{
318 ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
319}
320
321static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
322{
323 ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
324}
325
efc2214b
MF
326static inline bool ice_ring_is_xdp(struct ice_ring *ring)
327{
328 return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
329}
330
3a858ba3 331struct ice_ring_container {
63f545ed 332 /* head of linked-list of rings */
3a858ba3 333 struct ice_ring *ring;
63f545ed 334 unsigned long next_update; /* jiffies value of next queue update */
3a858ba3
AV
335 unsigned int total_bytes; /* total bytes processed this int */
336 unsigned int total_pkts; /* total packets processed this int */
8244dd2d 337 u16 itr_idx; /* index in the interrupt vector */
63f545ed
BC
338 u16 target_itr; /* value in usecs divided by the hw->itr_gran */
339 u16 current_itr; /* value in usecs divided by the hw->itr_gran */
340 /* high bit set means dynamic ITR, rest is used to store user
341 * readable ITR value in usecs and must be converted before programming
342 * to a register.
343 */
344 u16 itr_setting;
3a858ba3
AV
345};
346
61dc79ce
MS
347struct ice_coalesce_stored {
348 u16 itr_tx;
349 u16 itr_rx;
350 u8 intrl;
351};
352
3a858ba3
AV
353/* iterator for handling rings in ring container */
354#define ice_for_each_ring(pos, head) \
355 for (pos = (head).ring; pos; pos = pos->next)
356
7237f5b0
MF
357static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
358{
359#if (PAGE_SIZE < 8192)
360 if (ring->rx_buf_len > (PAGE_SIZE / 2))
361 return 1;
362#endif
363 return 0;
364}
365
366#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
367
2d4238f5
KK
368union ice_32b_rx_flex_desc;
369
cdedef59 370bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
2b245cb2 371netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
cdedef59
AV
372void ice_clean_tx_ring(struct ice_ring *tx_ring);
373void ice_clean_rx_ring(struct ice_ring *rx_ring);
374int ice_setup_tx_ring(struct ice_ring *tx_ring);
375int ice_setup_rx_ring(struct ice_ring *rx_ring);
376void ice_free_tx_ring(struct ice_ring *tx_ring);
377void ice_free_rx_ring(struct ice_ring *rx_ring);
2b245cb2
AV
378int ice_napi_poll(struct napi_struct *napi, int budget);
379
940b61af 380#endif /* _ICE_TXRX_H_ */