1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2018, Intel Corporation. */
9 #define ICE_DFLT_IRQ_WORK 256
10 #define ICE_RXBUF_3072 3072
11 #define ICE_RXBUF_2048 2048
12 #define ICE_RXBUF_1664 1664
13 #define ICE_RXBUF_1536 1536
14 #define ICE_MAX_CHAINED_RX_BUFS 5
15 #define ICE_MAX_BUF_TXD 8
16 #define ICE_MIN_TX_LEN 17
17 #define ICE_MAX_FRAME_LEGACY_RX 8320
19 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
20 * In order to align with the read requests we will align the value to
21 * the nearest 4K which represents our maximum read request size.
23 #define ICE_MAX_READ_REQ_SIZE 4096
24 #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
25 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
26 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
28 #define ICE_MAX_TXQ_PER_TXQG 128
30 /* Attempt to maximize the headroom available for incoming frames. We use a 2K
31 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
32 * This leaves us with 512 bytes of room. From that we need to deduct the
33 * space needed for the shared info and the padding needed to IP align the
36 * Note: For cache line sizes 256 or larger this value is going to end
37 * up negative. In these cases we should fall back to the legacy
40 #if (PAGE_SIZE < 8192)
41 #define ICE_2K_TOO_SMALL_WITH_PADDING \
42 ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
43 SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
46 * ice_compute_pad - compute the padding
47 * @rx_buf_len: buffer length
49 * Figure out the size of half page based on given buffer length and
50 * then subtract the skb_shared_info followed by subtraction of the
51 * actual buffer length; this in turn results in the actual space that
52 * is left for padding usage
54 static inline int ice_compute_pad(int rx_buf_len)
58 half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
59 return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
63 * ice_skb_pad - determine the padding that we can supply
65 * Figure out the right Rx buffer size and based on that calculate the
68 static inline int ice_skb_pad(void)
72 /* If a 2K buffer cannot handle a standard Ethernet frame then
73 * optimize padding for a 3K buffer instead of a 1.5K buffer.
75 * For a 3K buffer we need to add enough padding to allow for
76 * tailroom due to NET_IP_ALIGN possibly shifting us out of
77 * cache-line alignment.
79 if (ICE_2K_TOO_SMALL_WITH_PADDING)
80 rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
82 rx_buf_len = ICE_RXBUF_1536;
84 /* if needed make room for NET_IP_ALIGN */
85 rx_buf_len -= NET_IP_ALIGN;
87 return ice_compute_pad(rx_buf_len);
90 #define ICE_SKB_PAD ice_skb_pad()
92 #define ICE_2K_TOO_SMALL_WITH_PADDING false
93 #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
96 /* We are assuming that the cache line is always 64 Bytes here for ice.
97 * In order to make sure that is a correct assumption there is a check in probe
98 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
99 * size is 128 bytes. We do it this way because we do not want to read the
100 * GLPCI_CNF2 register or a variable containing the value on every pass through
103 #define ICE_CACHE_LINE_BYTES 64
104 #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
105 sizeof(struct ice_tx_desc))
106 #define ICE_DESCS_FOR_CTX_DESC 1
107 #define ICE_DESCS_FOR_SKB_DATA_PTR 1
108 /* Tx descriptors needed, worst case */
109 #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
110 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
111 #define ICE_DESC_UNUSED(R) \
112 (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
113 (R)->next_to_clean - (R)->next_to_use - 1)
115 #define ICE_RX_DESC_UNUSED(R) \
116 ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
117 (R)->first_desc - (R)->next_to_use - 1)
119 #define ICE_RING_QUARTER(R) ((R)->count >> 2)
121 #define ICE_TX_FLAGS_TSO BIT(0)
122 #define ICE_TX_FLAGS_HW_VLAN BIT(1)
123 #define ICE_TX_FLAGS_SW_VLAN BIT(2)
124 /* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
125 * freed instead of returned like skb packets.
127 #define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
128 #define ICE_TX_FLAGS_TSYN BIT(4)
129 #define ICE_TX_FLAGS_IPV4 BIT(5)
130 #define ICE_TX_FLAGS_IPV6 BIT(6)
131 #define ICE_TX_FLAGS_TUNNEL BIT(7)
132 #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
133 #define ICE_TX_FLAGS_VLAN_M 0xffff0000
134 #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
135 #define ICE_TX_FLAGS_VLAN_PR_S 29
136 #define ICE_TX_FLAGS_VLAN_S 16
138 #define ICE_XDP_PASS 0
139 #define ICE_XDP_CONSUMED BIT(0)
140 #define ICE_XDP_TX BIT(1)
141 #define ICE_XDP_REDIR BIT(2)
142 #define ICE_XDP_EXIT BIT(3)
143 #define ICE_SKB_CONSUMED ICE_XDP_CONSUMED
145 #define ICE_RX_DMA_ATTR \
146 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
148 #define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
150 #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
153 struct ice_tx_desc *next_to_watch;
156 void *raw_buf; /* used for XDP */
158 unsigned int bytecount;
159 unsigned short gso_segs;
161 DEFINE_DMA_UNMAP_LEN(len);
162 DEFINE_DMA_UNMAP_ADDR(dma);
165 struct ice_tx_offload_params {
167 struct ice_tx_ring *tx_ring;
171 u32 cd_tunnel_params;
179 unsigned int page_offset;
182 unsigned int pagecnt_bias;
190 struct ice_txq_stats {
194 int prev_pkt; /* negative if no pending Tx descriptors */
197 struct ice_rxq_stats {
199 u64 alloc_page_failed;
200 u64 alloc_buf_failed;
203 struct ice_ring_stats {
204 struct rcu_head rcu; /* to avoid race on free */
205 struct ice_q_stats stats;
206 struct u64_stats_sync syncp;
208 struct ice_txq_stats tx_stats;
209 struct ice_rxq_stats rx_stats;
213 enum ice_ring_state_t {
214 ICE_TX_XPS_INIT_DONE,
218 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
219 * registers and QINT registers or more generally anywhere in the manual
220 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
221 * register but instead is a special value meaning "don't update" ITR0/1/2.
227 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
230 /* Header split modes defined by DTYPE field of Rx RLAN context */
232 ICE_RX_DTYPE_NO_SPLIT = 0,
233 ICE_RX_DTYPE_HEADER_SPLIT = 1,
234 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
237 /* indices into GLINT_ITR registers */
238 #define ICE_RX_ITR ICE_IDX_ITR0
239 #define ICE_TX_ITR ICE_IDX_ITR1
240 #define ICE_ITR_8K 124
241 #define ICE_ITR_20K 50
242 #define ICE_ITR_MAX 8160 /* 0x1FE0 */
243 #define ICE_DFLT_TX_ITR ICE_ITR_20K
244 #define ICE_DFLT_RX_ITR ICE_ITR_20K
245 enum ice_dynamic_itr {
250 #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
251 #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
252 #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
253 #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
254 #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK)
256 #define ICE_DFLT_INTRL 0
257 #define ICE_MAX_INTRL 236
259 #define ICE_IN_WB_ON_ITR_MODE 255
260 /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
261 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
262 * set the write-back latency to the usecs passed in.
264 #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
265 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
266 GLINT_DYN_CTL_INTERVAL_M) | \
267 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
268 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
269 GLINT_DYN_CTL_WB_ON_ITR_M)
271 /* Legacy or Advanced Mode Queue */
272 #define ICE_TX_ADVANCED 0
273 #define ICE_TX_LEGACY 1
275 /* descriptor ring, associated with a VSI */
277 /* CL1 - 1st cacheline starts here */
278 struct ice_rx_ring *next; /* pointer to next ring in q_vector */
279 void *desc; /* Descriptor ring memory */
280 struct device *dev; /* Used for DMA mapping */
281 struct net_device *netdev; /* netdev ring maps to */
282 struct ice_vsi *vsi; /* Backreference to associated VSI */
283 struct ice_q_vector *q_vector; /* Backreference to associated vector */
285 u16 q_index; /* Queue number of ring */
287 u16 count; /* Number of descriptors */
288 u16 reg_idx; /* HW register index of the ring */
290 /* CL2 - 2nd cacheline starts here */
292 struct ice_rx_buf *rx_buf;
293 struct xdp_buff **xdp_buf;
296 /* CL3 - 3rd cacheline starts here */
297 struct bpf_prog *xdp_prog;
300 /* used in interrupt processing */
306 struct ice_ring_stats *ring_stats;
308 struct rcu_head rcu; /* to avoid race on free */
309 /* CL4 - 4th cacheline starts here */
310 struct ice_channel *ch;
311 struct ice_tx_ring *xdp_ring;
312 struct xsk_buff_pool *xsk_pool;
313 dma_addr_t dma; /* physical address of ring */
316 u8 dcb_tc; /* Traffic class of ring */
318 #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
319 #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
321 /* CL5 - 5th cacheline starts here */
322 struct xdp_rxq_info xdp_rxq;
323 } ____cacheline_internodealigned_in_smp;
326 /* CL1 - 1st cacheline starts here */
327 struct ice_tx_ring *next; /* pointer to next ring in q_vector */
328 void *desc; /* Descriptor ring memory */
329 struct device *dev; /* Used for DMA mapping */
331 struct ice_tx_buf *tx_buf;
332 struct ice_q_vector *q_vector; /* Backreference to associated vector */
333 struct net_device *netdev; /* netdev ring maps to */
334 struct ice_vsi *vsi; /* Backreference to associated VSI */
335 /* CL2 - 2nd cacheline starts here */
336 dma_addr_t dma; /* physical address of ring */
337 struct xsk_buff_pool *xsk_pool;
342 u16 q_handle; /* Queue handle per TC */
343 u16 reg_idx; /* HW register index of the ring */
344 u16 count; /* Number of descriptors */
345 u16 q_index; /* Queue number of ring */
347 struct ice_ring_stats *ring_stats;
348 /* CL3 - 3rd cacheline starts here */
349 struct rcu_head rcu; /* to avoid race on free */
350 DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
351 struct ice_channel *ch;
352 struct ice_ptp_tx *tx_tstamps;
354 u32 txq_teid; /* Added Tx queue TEID */
355 /* CL4 - 4th cacheline starts here */
357 #define ICE_TX_FLAGS_RING_XDP BIT(0)
358 #define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
359 #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
361 u8 dcb_tc; /* Traffic class of ring */
363 } ____cacheline_internodealigned_in_smp;
365 static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
367 return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
370 static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
372 ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
375 static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
377 ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
380 static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
385 static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
387 return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
390 enum ice_container_type {
395 struct ice_ring_container {
396 /* head of linked-list of rings */
398 struct ice_rx_ring *rx_ring;
399 struct ice_tx_ring *tx_ring;
401 struct dim dim; /* data for net_dim algorithm */
402 u16 itr_idx; /* index in the interrupt vector */
403 /* this matches the maximum number of ITR bits, but in usec
404 * values, so it is shifted left one bit (bit zero is ignored)
414 enum ice_container_type type;
417 struct ice_coalesce_stored {
425 /* iterator for handling rings in ring container */
426 #define ice_for_each_rx_ring(pos, head) \
427 for (pos = (head).rx_ring; pos; pos = pos->next)
429 #define ice_for_each_tx_ring(pos, head) \
430 for (pos = (head).tx_ring; pos; pos = pos->next)
432 static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
434 #if (PAGE_SIZE < 8192)
435 if (ring->rx_buf_len > (PAGE_SIZE / 2))
441 #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
443 union ice_32b_rx_flex_desc;
445 bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
446 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
448 ice_select_queue(struct net_device *dev, struct sk_buff *skb,
449 struct net_device *sb_dev);
450 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
451 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
452 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
453 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
454 void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
455 void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
456 int ice_napi_poll(struct napi_struct *napi, int budget);
458 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
460 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
461 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
462 #endif /* _ICE_TXRX_H_ */