Commit | Line | Data |
---|---|---|
e586b3b0 | 1 | /* |
98795158 | 2 | * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. |
e586b3b0 AV |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/tcp.h> | |
34 | #include <linux/if_vlan.h> | |
e3cfc7e6 | 35 | #include <net/geneve.h> |
fbcb127e | 36 | #include <net/dsfield.h> |
e586b3b0 | 37 | #include "en.h" |
542578c6 | 38 | #include "en/txrx.h" |
4301ba7b | 39 | #include "ipoib/ipoib.h" |
bf239741 | 40 | #include "en_accel/en_accel.h" |
428ffea0 | 41 | #include "en_accel/ipsec_rxtx.h" |
145e5637 | 42 | #include "en/ptp.h" |
e586b3b0 | 43 | |
31391048 | 44 | static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) |
e586b3b0 | 45 | { |
d4e28cbd AS |
46 | int i; |
47 | ||
34802a42 | 48 | for (i = 0; i < num_dma; i++) { |
d4e28cbd AS |
49 | struct mlx5e_sq_dma *last_pushed_dma = |
50 | mlx5e_dma_get(sq, --sq->dma_fifo_pc); | |
51 | ||
52 | mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); | |
53 | } | |
e586b3b0 AV |
54 | } |
55 | ||
fbcb127e HN |
56 | #ifdef CONFIG_MLX5_CORE_EN_DCB |
57 | static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) | |
58 | { | |
59 | int dscp_cp = 0; | |
60 | ||
61 | if (skb->protocol == htons(ETH_P_IP)) | |
62 | dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; | |
63 | else if (skb->protocol == htons(ETH_P_IPV6)) | |
64 | dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; | |
65 | ||
66 | return priv->dcbx_dp.dscp2prio[dscp_cp]; | |
67 | } | |
68 | #endif | |
69 | ||
145e5637 EBE |
70 | static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb) |
71 | { | |
72 | struct mlx5e_priv *priv = netdev_priv(dev); | |
73 | int up = 0; | |
74 | ||
75 | if (!netdev_get_num_tc(dev)) | |
76 | goto return_txq; | |
77 | ||
78 | #ifdef CONFIG_MLX5_CORE_EN_DCB | |
79 | if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) | |
80 | up = mlx5e_get_dscp_up(priv, skb); | |
81 | else | |
82 | #endif | |
83 | if (skb_vlan_tag_present(skb)) | |
84 | up = skb_vlan_tag_get_prio(skb); | |
85 | ||
86 | return_txq: | |
87 | return priv->port_ptp_tc2realtxq[up]; | |
88 | } | |
89 | ||
214baf22 MM |
90 | static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb, |
91 | u16 htb_maj_id) | |
92 | { | |
93 | u16 classid; | |
94 | ||
95 | if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id) | |
96 | classid = TC_H_MIN(skb->priority); | |
97 | else | |
98 | classid = READ_ONCE(priv->htb.defcls); | |
99 | ||
100 | if (!classid) | |
101 | return 0; | |
102 | ||
103 | return mlx5e_get_txq_by_classid(priv, classid); | |
104 | } | |
105 | ||
e586b3b0 | 106 | u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, |
a350ecce | 107 | struct net_device *sb_dev) |
e586b3b0 AV |
108 | { |
109 | struct mlx5e_priv *priv = netdev_priv(dev); | |
214baf22 | 110 | int num_tc_x_num_ch; |
145e5637 | 111 | int txq_ix; |
7ccdd084 | 112 | int up = 0; |
4229e0ea | 113 | int ch_ix; |
7ccdd084 | 114 | |
214baf22 MM |
115 | /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ |
116 | num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); | |
117 | if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) { | |
24c22dd0 AL |
118 | struct mlx5e_ptp *ptp_channel; |
119 | ||
214baf22 MM |
120 | /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ |
121 | u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id); | |
145e5637 | 122 | |
214baf22 MM |
123 | if (unlikely(htb_maj_id)) { |
124 | txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id); | |
125 | if (txq_ix > 0) | |
126 | return txq_ix; | |
127 | } | |
145e5637 | 128 | |
24c22dd0 | 129 | ptp_channel = READ_ONCE(priv->channels.ptp); |
a6ee6f5f AL |
130 | if (unlikely(ptp_channel && |
131 | test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) && | |
132 | mlx5e_use_ptpsq(skb))) | |
24c22dd0 | 133 | return mlx5e_select_ptpsq(dev, skb); |
145e5637 EBE |
134 | |
135 | txq_ix = netdev_pick_tx(dev, skb, NULL); | |
214baf22 | 136 | /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs. |
145e5637 | 137 | * If they are selected, switch to regular queues. |
214baf22 MM |
138 | * Driver to select these queues only at mlx5e_select_ptpsq() |
139 | * and mlx5e_select_htb_queue(). | |
145e5637 EBE |
140 | */ |
141 | if (unlikely(txq_ix >= num_tc_x_num_ch)) | |
142 | txq_ix %= num_tc_x_num_ch; | |
143 | } else { | |
144 | txq_ix = netdev_pick_tx(dev, skb, NULL); | |
145 | } | |
146 | ||
7ccdd084 | 147 | if (!netdev_get_num_tc(dev)) |
57c70d87 | 148 | return txq_ix; |
7ccdd084 | 149 | |
fbcb127e HN |
150 | #ifdef CONFIG_MLX5_CORE_EN_DCB |
151 | if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) | |
152 | up = mlx5e_get_dscp_up(priv, skb); | |
153 | else | |
154 | #endif | |
155 | if (skb_vlan_tag_present(skb)) | |
6c0fbd72 | 156 | up = skb_vlan_tag_get_prio(skb); |
7ccdd084 | 157 | |
4229e0ea EBE |
158 | /* Normalize any picked txq_ix to [0, num_channels), |
159 | * So we can return a txq_ix that matches the channel and | |
160 | * packet UP. | |
7ccdd084 | 161 | */ |
4229e0ea | 162 | ch_ix = priv->txq2sq[txq_ix]->ch_ix; |
e586b3b0 | 163 | |
4229e0ea | 164 | return priv->channel_tc2realtxq[ch_ix][up]; |
e586b3b0 AV |
165 | } |
166 | ||
ae76715d HHZ |
167 | static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) |
168 | { | |
169 | #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) | |
170 | ||
171 | return max(skb_network_offset(skb), MLX5E_MIN_INLINE); | |
172 | } | |
173 | ||
174 | static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) | |
175 | { | |
ae76715d HHZ |
176 | if (skb_transport_header_was_set(skb)) |
177 | return skb_transport_offset(skb); | |
ae76715d HHZ |
178 | else |
179 | return mlx5e_skb_l2_header_offset(skb); | |
180 | } | |
181 | ||
6aace17e MS |
182 | static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, |
183 | struct sk_buff *skb) | |
ae76715d | 184 | { |
6aace17e | 185 | u16 hlen; |
ae76715d HHZ |
186 | |
187 | switch (mode) { | |
a6f402e4 SM |
188 | case MLX5_INLINE_MODE_NONE: |
189 | return 0; | |
ae76715d | 190 | case MLX5_INLINE_MODE_TCP_UDP: |
c43f1255 | 191 | hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); |
ae76715d HHZ |
192 | if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) |
193 | hlen += VLAN_HLEN; | |
6aace17e | 194 | break; |
ae76715d | 195 | case MLX5_INLINE_MODE_IP: |
3517dfe6 MM |
196 | hlen = mlx5e_skb_l3_header_offset(skb); |
197 | break; | |
ae76715d HHZ |
198 | case MLX5_INLINE_MODE_L2: |
199 | default: | |
6aace17e | 200 | hlen = mlx5e_skb_l2_header_offset(skb); |
ae76715d | 201 | } |
f600c608 | 202 | return min_t(u16, hlen, skb_headlen(skb)); |
ae76715d HHZ |
203 | } |
204 | ||
5e7d77a9 | 205 | static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) |
e4cf27bd AS |
206 | { |
207 | struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; | |
208 | int cpy1_sz = 2 * ETH_ALEN; | |
3ea4891d | 209 | int cpy2_sz = ihs - cpy1_sz; |
e4cf27bd | 210 | |
6d5c900e | 211 | memcpy(&vhdr->addrs, skb->data, cpy1_sz); |
e4cf27bd AS |
212 | vhdr->h_vlan_proto = skb->vlan_proto; |
213 | vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); | |
5e7d77a9 | 214 | memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); |
e4cf27bd AS |
215 | } |
216 | ||
77bdf895 | 217 | static inline void |
b336e6b2 TT |
218 | mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, |
219 | struct mlx5e_accel_tx_state *accel, | |
220 | struct mlx5_wqe_eth_seg *eseg) | |
e586b3b0 | 221 | { |
428ffea0 | 222 | if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg))) |
1d000323 | 223 | return; |
1d000323 | 224 | |
98795158 MF |
225 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
226 | eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; | |
89db09eb | 227 | if (skb->encapsulation) { |
98795158 MF |
228 | eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | |
229 | MLX5_ETH_WQE_L4_INNER_CSUM; | |
05909bab | 230 | sq->stats->csum_partial_inner++; |
89db09eb | 231 | } else { |
98795158 | 232 | eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; |
05909bab | 233 | sq->stats->csum_partial++; |
89db09eb | 234 | } |
b336e6b2 TT |
235 | #ifdef CONFIG_MLX5_EN_TLS |
236 | } else if (unlikely(accel && accel->tls.tls_tisn)) { | |
237 | eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; | |
238 | sq->stats->csum_partial++; | |
239 | #endif | |
98795158 | 240 | } else |
05909bab | 241 | sq->stats->csum_none++; |
77bdf895 | 242 | } |
e586b3b0 | 243 | |
77bdf895 | 244 | static inline u16 |
043dc78e | 245 | mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) |
77bdf895 | 246 | { |
05909bab | 247 | struct mlx5e_sq_stats *stats = sq->stats; |
77bdf895 | 248 | u16 ihs; |
98795158 | 249 | |
77bdf895 SM |
250 | if (skb->encapsulation) { |
251 | ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); | |
05909bab EBE |
252 | stats->tso_inner_packets++; |
253 | stats->tso_inner_bytes += skb->len - ihs; | |
e586b3b0 | 254 | } else { |
689adf0d BP |
255 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) |
256 | ihs = skb_transport_offset(skb) + sizeof(struct udphdr); | |
257 | else | |
258 | ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
05909bab EBE |
259 | stats->tso_packets++; |
260 | stats->tso_bytes += skb->len - ihs; | |
e586b3b0 AV |
261 | } |
262 | ||
77bdf895 SM |
263 | return ihs; |
264 | } | |
e586b3b0 | 265 | |
77bdf895 SM |
266 | static inline int |
267 | mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
268 | unsigned char *skb_data, u16 headlen, | |
269 | struct mlx5_wqe_data_seg *dseg) | |
270 | { | |
271 | dma_addr_t dma_addr = 0; | |
272 | u8 num_dma = 0; | |
273 | int i; | |
e586b3b0 | 274 | |
e586b3b0 | 275 | if (headlen) { |
34802a42 | 276 | dma_addr = dma_map_single(sq->pdev, skb_data, headlen, |
e586b3b0 AV |
277 | DMA_TO_DEVICE); |
278 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) | |
d9a96ec3 | 279 | goto dma_unmap_wqe_err; |
e586b3b0 AV |
280 | |
281 | dseg->addr = cpu_to_be64(dma_addr); | |
282 | dseg->lkey = sq->mkey_be; | |
283 | dseg->byte_count = cpu_to_be32(headlen); | |
284 | ||
d4e28cbd | 285 | mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); |
77bdf895 | 286 | num_dma++; |
e586b3b0 AV |
287 | dseg++; |
288 | } | |
289 | ||
290 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
d7840976 | 291 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
e586b3b0 AV |
292 | int fsz = skb_frag_size(frag); |
293 | ||
294 | dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, | |
e53eef63 | 295 | DMA_TO_DEVICE); |
e586b3b0 | 296 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) |
d9a96ec3 | 297 | goto dma_unmap_wqe_err; |
e586b3b0 AV |
298 | |
299 | dseg->addr = cpu_to_be64(dma_addr); | |
300 | dseg->lkey = sq->mkey_be; | |
301 | dseg->byte_count = cpu_to_be32(fsz); | |
302 | ||
d4e28cbd | 303 | mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); |
77bdf895 | 304 | num_dma++; |
e586b3b0 AV |
305 | dseg++; |
306 | } | |
307 | ||
77bdf895 | 308 | return num_dma; |
d9a96ec3 TT |
309 | |
310 | dma_unmap_wqe_err: | |
311 | mlx5e_dma_unmap_wqe_err(sq, num_dma); | |
312 | return -ENOMEM; | |
77bdf895 | 313 | } |
e586b3b0 | 314 | |
8e4b53f6 MM |
315 | struct mlx5e_tx_attr { |
316 | u32 num_bytes; | |
317 | u16 headlen; | |
318 | u16 ihs; | |
319 | __be16 mss; | |
5be01904 | 320 | u16 insz; |
8e4b53f6 MM |
321 | u8 opcode; |
322 | }; | |
323 | ||
324 | struct mlx5e_tx_wqe_attr { | |
325 | u16 ds_cnt; | |
326 | u16 ds_cnt_inl; | |
5be01904 | 327 | u16 ds_cnt_ids; |
8e4b53f6 MM |
328 | u8 num_wqebbs; |
329 | }; | |
d02dfcd5 MM |
330 | |
331 | static u8 | |
8e4b53f6 MM |
332 | mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb, |
333 | struct mlx5e_accel_tx_state *accel) | |
d02dfcd5 MM |
334 | { |
335 | u8 mode; | |
336 | ||
8e4b53f6 MM |
337 | #ifdef CONFIG_MLX5_EN_TLS |
338 | if (accel && accel->tls.tls_tisn) | |
d02dfcd5 | 339 | return MLX5_INLINE_MODE_TCP_UDP; |
8e4b53f6 | 340 | #endif |
d02dfcd5 MM |
341 | |
342 | mode = sq->min_inline_mode; | |
343 | ||
344 | if (skb_vlan_tag_present(skb) && | |
345 | test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state)) | |
346 | mode = max_t(u8, MLX5_INLINE_MODE_L2, mode); | |
347 | ||
348 | return mode; | |
349 | } | |
350 | ||
8e4b53f6 MM |
351 | static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb, |
352 | struct mlx5e_accel_tx_state *accel, | |
353 | struct mlx5e_tx_attr *attr) | |
354 | { | |
355 | struct mlx5e_sq_stats *stats = sq->stats; | |
356 | ||
357 | if (skb_is_gso(skb)) { | |
358 | u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb); | |
359 | ||
360 | *attr = (struct mlx5e_tx_attr) { | |
361 | .opcode = MLX5_OPCODE_LSO, | |
362 | .mss = cpu_to_be16(skb_shinfo(skb)->gso_size), | |
363 | .ihs = ihs, | |
364 | .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs, | |
365 | .headlen = skb_headlen(skb) - ihs, | |
366 | }; | |
367 | ||
368 | stats->packets += skb_shinfo(skb)->gso_segs; | |
369 | } else { | |
370 | u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel); | |
371 | u16 ihs = mlx5e_calc_min_inline(mode, skb); | |
372 | ||
373 | *attr = (struct mlx5e_tx_attr) { | |
374 | .opcode = MLX5_OPCODE_SEND, | |
375 | .mss = cpu_to_be16(0), | |
376 | .ihs = ihs, | |
377 | .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN), | |
378 | .headlen = skb_headlen(skb) - ihs, | |
379 | }; | |
380 | ||
381 | stats->packets++; | |
382 | } | |
383 | ||
5be01904 | 384 | attr->insz = mlx5e_accel_tx_ids_len(sq, accel); |
8e4b53f6 MM |
385 | stats->bytes += attr->num_bytes; |
386 | } | |
387 | ||
388 | static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr, | |
389 | struct mlx5e_tx_wqe_attr *wqe_attr) | |
390 | { | |
97e3afd6 | 391 | u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT; |
8e4b53f6 | 392 | u16 ds_cnt_inl = 0; |
5be01904 | 393 | u16 ds_cnt_ids = 0; |
8e4b53f6 | 394 | |
5be01904 RS |
395 | if (attr->insz) |
396 | ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz, | |
397 | MLX5_SEND_WQE_DS); | |
8e4b53f6 | 398 | |
5be01904 | 399 | ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids; |
8e4b53f6 MM |
400 | if (attr->ihs) { |
401 | u16 inl = attr->ihs - INL_HDR_START_SZ; | |
402 | ||
403 | if (skb_vlan_tag_present(skb)) | |
404 | inl += VLAN_HLEN; | |
405 | ||
406 | ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); | |
407 | ds_cnt += ds_cnt_inl; | |
408 | } | |
409 | ||
410 | *wqe_attr = (struct mlx5e_tx_wqe_attr) { | |
411 | .ds_cnt = ds_cnt, | |
412 | .ds_cnt_inl = ds_cnt_inl, | |
5be01904 | 413 | .ds_cnt_ids = ds_cnt_ids, |
8e4b53f6 MM |
414 | .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS), |
415 | }; | |
416 | } | |
417 | ||
67044a88 MM |
418 | static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb) |
419 | { | |
420 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) | |
421 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
422 | } | |
423 | ||
424 | static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq) | |
425 | { | |
426 | if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) { | |
427 | netif_tx_stop_queue(sq->txq); | |
428 | sq->stats->stopped++; | |
429 | } | |
430 | } | |
431 | ||
77bdf895 SM |
432 | static inline void |
433 | mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
8e4b53f6 MM |
434 | const struct mlx5e_tx_attr *attr, |
435 | const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma, | |
3c31ff22 FW |
436 | struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg, |
437 | bool xmit_more) | |
77bdf895 SM |
438 | { |
439 | struct mlx5_wq_cyc *wq = &sq->wq; | |
ca6c7df0 | 440 | bool send_doorbell; |
e586b3b0 | 441 | |
8ba6f183 MM |
442 | *wi = (struct mlx5e_tx_wqe_info) { |
443 | .skb = skb, | |
8e4b53f6 | 444 | .num_bytes = attr->num_bytes, |
8ba6f183 | 445 | .num_dma = num_dma, |
8e4b53f6 | 446 | .num_wqebbs = wqe_attr->num_wqebbs, |
338c46c6 | 447 | .num_fifo_pkts = 0, |
8ba6f183 | 448 | }; |
e586b3b0 | 449 | |
8e4b53f6 MM |
450 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode); |
451 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt); | |
77bdf895 | 452 | |
67044a88 | 453 | mlx5e_tx_skb_update_hwts_flags(skb); |
ef9814de | 454 | |
77bdf895 | 455 | sq->pc += wi->num_wqebbs; |
67044a88 MM |
456 | |
457 | mlx5e_tx_check_stop(sq); | |
e586b3b0 | 458 | |
1880bc4e EBE |
459 | if (unlikely(sq->ptpsq)) { |
460 | mlx5e_skb_cb_hwtstamp_init(skb); | |
461 | mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb); | |
462 | skb_get(skb); | |
463 | } | |
464 | ||
8e4b53f6 | 465 | send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more); |
ca6c7df0 | 466 | if (send_doorbell) |
864b2d71 | 467 | mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); |
77bdf895 SM |
468 | } |
469 | ||
8e4b53f6 MM |
470 | static void |
471 | mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
472 | const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr, | |
473 | struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more) | |
77bdf895 | 474 | { |
043dc78e TT |
475 | struct mlx5_wqe_ctrl_seg *cseg; |
476 | struct mlx5_wqe_eth_seg *eseg; | |
477 | struct mlx5_wqe_data_seg *dseg; | |
478 | struct mlx5e_tx_wqe_info *wi; | |
77bdf895 | 479 | |
05909bab | 480 | struct mlx5e_sq_stats *stats = sq->stats; |
77bdf895 | 481 | int num_dma; |
043dc78e | 482 | |
299a1195 | 483 | stats->xmit_more += xmit_more; |
77bdf895 | 484 | |
043dc78e TT |
485 | /* fill wqe */ |
486 | wi = &sq->db.wqe_info[pi]; | |
487 | cseg = &wqe->ctrl; | |
488 | eseg = &wqe->eth; | |
489 | dseg = wqe->data; | |
490 | ||
8e4b53f6 | 491 | eseg->mss = attr->mss; |
043dc78e | 492 | |
8e4b53f6 | 493 | if (attr->ihs) { |
77bdf895 | 494 | if (skb_vlan_tag_present(skb)) { |
5be01904 | 495 | eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN); |
8e4b53f6 | 496 | mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs); |
05909bab | 497 | stats->added_vlan_packets++; |
77bdf895 | 498 | } else { |
5be01904 | 499 | eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs); |
8e4b53f6 | 500 | memcpy(eseg->inline_hdr.start, skb->data, attr->ihs); |
77bdf895 | 501 | } |
8e4b53f6 | 502 | dseg += wqe_attr->ds_cnt_inl; |
77bdf895 SM |
503 | } else if (skb_vlan_tag_present(skb)) { |
504 | eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); | |
4382c7b9 GP |
505 | if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) |
506 | eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); | |
77bdf895 | 507 | eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); |
05909bab | 508 | stats->added_vlan_packets++; |
77bdf895 SM |
509 | } |
510 | ||
5be01904 | 511 | dseg += wqe_attr->ds_cnt_ids; |
8e4b53f6 MM |
512 | num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs, |
513 | attr->headlen, dseg); | |
77bdf895 | 514 | if (unlikely(num_dma < 0)) |
d9a96ec3 | 515 | goto err_drop; |
77bdf895 | 516 | |
8e4b53f6 | 517 | mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more); |
12be4b21 | 518 | |
3df711db | 519 | return; |
e586b3b0 | 520 | |
d9a96ec3 | 521 | err_drop: |
05909bab | 522 | stats->dropped++; |
e586b3b0 | 523 | dev_kfree_skb_any(skb); |
e586b3b0 AV |
524 | } |
525 | ||
5af75c74 MM |
526 | static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr) |
527 | { | |
5be01904 RS |
528 | return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs && |
529 | !attr->insz; | |
5af75c74 MM |
530 | } |
531 | ||
532 | static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg) | |
533 | { | |
534 | struct mlx5e_tx_mpwqe *session = &sq->mpwqe; | |
535 | ||
536 | /* Assumes the session is already running and has at least one packet. */ | |
537 | return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); | |
538 | } | |
539 | ||
540 | static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, | |
541 | struct mlx5_wqe_eth_seg *eseg) | |
542 | { | |
543 | struct mlx5e_tx_mpwqe *session = &sq->mpwqe; | |
544 | struct mlx5e_tx_wqe *wqe; | |
545 | u16 pi; | |
546 | ||
547 | pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); | |
548 | wqe = MLX5E_TX_FETCH_WQE(sq, pi); | |
991b2654 | 549 | net_prefetchw(wqe->data); |
5af75c74 MM |
550 | |
551 | *session = (struct mlx5e_tx_mpwqe) { | |
552 | .wqe = wqe, | |
553 | .bytes_count = 0, | |
554 | .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, | |
555 | .pkt_count = 0, | |
556 | .inline_on = 0, | |
557 | }; | |
558 | ||
559 | memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); | |
560 | ||
561 | sq->stats->mpwqe_blks++; | |
562 | } | |
563 | ||
564 | static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq) | |
565 | { | |
566 | return sq->mpwqe.wqe; | |
567 | } | |
568 | ||
569 | static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd) | |
570 | { | |
571 | struct mlx5e_tx_mpwqe *session = &sq->mpwqe; | |
572 | struct mlx5_wqe_data_seg *dseg; | |
573 | ||
574 | dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count; | |
575 | ||
576 | session->pkt_count++; | |
577 | session->bytes_count += txd->len; | |
578 | ||
579 | dseg->addr = cpu_to_be64(txd->dma_addr); | |
580 | dseg->byte_count = cpu_to_be32(txd->len); | |
581 | dseg->lkey = sq->mkey_be; | |
582 | session->ds_count++; | |
583 | ||
584 | sq->stats->mpwqe_pkts++; | |
585 | } | |
586 | ||
587 | static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq) | |
588 | { | |
589 | struct mlx5e_tx_mpwqe *session = &sq->mpwqe; | |
590 | u8 ds_count = session->ds_count; | |
591 | struct mlx5_wqe_ctrl_seg *cseg; | |
592 | struct mlx5e_tx_wqe_info *wi; | |
593 | u16 pi; | |
594 | ||
595 | cseg = &session->wqe->ctrl; | |
596 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); | |
597 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); | |
598 | ||
599 | pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); | |
600 | wi = &sq->db.wqe_info[pi]; | |
601 | *wi = (struct mlx5e_tx_wqe_info) { | |
602 | .skb = NULL, | |
603 | .num_bytes = session->bytes_count, | |
604 | .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS), | |
605 | .num_dma = session->pkt_count, | |
606 | .num_fifo_pkts = session->pkt_count, | |
607 | }; | |
608 | ||
609 | sq->pc += wi->num_wqebbs; | |
610 | ||
611 | session->wqe = NULL; | |
612 | ||
613 | mlx5e_tx_check_stop(sq); | |
614 | ||
615 | return cseg; | |
616 | } | |
617 | ||
618 | static void | |
619 | mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
620 | struct mlx5_wqe_eth_seg *eseg, bool xmit_more) | |
621 | { | |
622 | struct mlx5_wqe_ctrl_seg *cseg; | |
623 | struct mlx5e_xmit_data txd; | |
624 | ||
625 | if (!mlx5e_tx_mpwqe_session_is_active(sq)) { | |
626 | mlx5e_tx_mpwqe_session_start(sq, eseg); | |
627 | } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) { | |
628 | mlx5e_tx_mpwqe_session_complete(sq); | |
629 | mlx5e_tx_mpwqe_session_start(sq, eseg); | |
630 | } | |
631 | ||
632 | sq->stats->xmit_more += xmit_more; | |
633 | ||
634 | txd.data = skb->data; | |
635 | txd.len = skb->len; | |
636 | ||
637 | txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE); | |
638 | if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr))) | |
639 | goto err_unmap; | |
640 | mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); | |
641 | ||
0b676aae | 642 | mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb); |
5af75c74 MM |
643 | |
644 | mlx5e_tx_mpwqe_add_dseg(sq, &txd); | |
645 | ||
646 | mlx5e_tx_skb_update_hwts_flags(skb); | |
647 | ||
648 | if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) { | |
649 | /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */ | |
650 | cseg = mlx5e_tx_mpwqe_session_complete(sq); | |
651 | ||
652 | if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) | |
653 | mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); | |
654 | } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) { | |
655 | /* Might stop the queue, but we were asked to ring the doorbell anyway. */ | |
656 | cseg = mlx5e_tx_mpwqe_session_complete(sq); | |
657 | ||
658 | mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); | |
659 | } | |
660 | ||
661 | return; | |
662 | ||
663 | err_unmap: | |
664 | mlx5e_dma_unmap_wqe_err(sq, 1); | |
665 | sq->stats->dropped++; | |
666 | dev_kfree_skb_any(skb); | |
667 | } | |
668 | ||
669 | void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) | |
670 | { | |
671 | /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */ | |
672 | if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq))) | |
673 | mlx5e_tx_mpwqe_session_complete(sq); | |
674 | } | |
675 | ||
f68406ca | 676 | static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, |
b336e6b2 | 677 | struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, |
b544011f | 678 | struct mlx5_wqe_eth_seg *eseg, u16 ihs) |
5af75c74 | 679 | { |
f68406ca | 680 | mlx5e_accel_tx_eseg(priv, skb, eseg, ihs); |
b336e6b2 | 681 | mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); |
5af75c74 MM |
682 | } |
683 | ||
e586b3b0 AV |
684 | netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) |
685 | { | |
686 | struct mlx5e_priv *priv = netdev_priv(dev); | |
714c88a3 | 687 | struct mlx5e_accel_tx_state accel = {}; |
8e4b53f6 MM |
688 | struct mlx5e_tx_wqe_attr wqe_attr; |
689 | struct mlx5e_tx_attr attr; | |
bf239741 IL |
690 | struct mlx5e_tx_wqe *wqe; |
691 | struct mlx5e_txqsq *sq; | |
692 | u16 pi; | |
2ac9cfe7 | 693 | |
bf239741 | 694 | sq = priv->txq2sq[skb_get_queue_mapping(skb)]; |
214baf22 MM |
695 | if (unlikely(!sq)) { |
696 | dev_kfree_skb_any(skb); | |
697 | return NETDEV_TX_OK; | |
698 | } | |
714c88a3 MM |
699 | |
700 | /* May send SKBs and WQEs. */ | |
701 | if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel))) | |
5af75c74 | 702 | return NETDEV_TX_OK; |
714c88a3 | 703 | |
8e4b53f6 | 704 | mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr); |
5af75c74 MM |
705 | |
706 | if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) { | |
707 | if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) { | |
708 | struct mlx5_wqe_eth_seg eseg = {}; | |
709 | ||
f68406ca | 710 | mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs); |
5af75c74 MM |
711 | mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more()); |
712 | return NETDEV_TX_OK; | |
713 | } | |
714 | ||
715 | mlx5e_tx_mpwqe_ensure_complete(sq); | |
716 | } | |
717 | ||
8e4b53f6 MM |
718 | mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr); |
719 | pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); | |
fed0c6cf | 720 | wqe = MLX5E_TX_FETCH_WQE(sq, pi); |
2ac9cfe7 | 721 | |
714c88a3 | 722 | /* May update the WQE, but may not post other WQEs. */ |
5be01904 RS |
723 | mlx5e_accel_tx_finish(sq, wqe, &accel, |
724 | (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl)); | |
f68406ca | 725 | mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs); |
8e4b53f6 | 726 | mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more()); |
689adf0d | 727 | |
3df711db | 728 | return NETDEV_TX_OK; |
e586b3b0 AV |
729 | } |
730 | ||
8e4b53f6 MM |
731 | void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more) |
732 | { | |
733 | struct mlx5e_tx_wqe_attr wqe_attr; | |
734 | struct mlx5e_tx_attr attr; | |
735 | struct mlx5e_tx_wqe *wqe; | |
736 | u16 pi; | |
737 | ||
738 | mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr); | |
739 | mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr); | |
740 | pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); | |
741 | wqe = MLX5E_TX_FETCH_WQE(sq, pi); | |
b336e6b2 | 742 | mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth); |
8e4b53f6 MM |
743 | mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more); |
744 | } | |
745 | ||
746 | static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, | |
747 | u32 *dma_fifo_cc) | |
748 | { | |
749 | int i; | |
750 | ||
751 | for (i = 0; i < wi->num_dma; i++) { | |
752 | struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++); | |
753 | ||
754 | mlx5e_tx_dma_unmap(sq->pdev, dma); | |
755 | } | |
756 | } | |
757 | ||
758 | static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |
759 | struct mlx5_cqe64 *cqe, int napi_budget) | |
760 | { | |
761 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { | |
762 | struct skb_shared_hwtstamps hwts = {}; | |
763 | u64 ts = get_cqe_ts(cqe); | |
764 | ||
432119de | 765 | hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts); |
1880bc4e EBE |
766 | if (sq->ptpsq) |
767 | mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP, | |
768 | hwts.hwtstamp, sq->ptpsq->cq_stats); | |
769 | else | |
770 | skb_tstamp_tx(skb, &hwts); | |
8e4b53f6 MM |
771 | } |
772 | ||
773 | napi_consume_skb(skb, napi_budget); | |
774 | } | |
775 | ||
338c46c6 MM |
776 | static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, |
777 | struct mlx5_cqe64 *cqe, int napi_budget) | |
778 | { | |
779 | int i; | |
780 | ||
781 | for (i = 0; i < wi->num_fifo_pkts; i++) { | |
0b676aae | 782 | struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo); |
338c46c6 MM |
783 | |
784 | mlx5e_consume_skb(sq, skb, cqe, napi_budget); | |
785 | } | |
786 | } | |
787 | ||
8ec736e5 | 788 | bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) |
e586b3b0 | 789 | { |
86155656 | 790 | struct mlx5e_sq_stats *stats; |
31391048 | 791 | struct mlx5e_txqsq *sq; |
4b7dfc99 | 792 | struct mlx5_cqe64 *cqe; |
e586b3b0 AV |
793 | u32 dma_fifo_cc; |
794 | u32 nbytes; | |
795 | u16 npkts; | |
796 | u16 sqcc; | |
797 | int i; | |
798 | ||
31391048 | 799 | sq = container_of(cq, struct mlx5e_txqsq, cq); |
e586b3b0 | 800 | |
0e5c04f6 | 801 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) |
29429f33 DJ |
802 | return false; |
803 | ||
4b7dfc99 TT |
804 | cqe = mlx5_cqwq_get_cqe(&cq->wq); |
805 | if (!cqe) | |
806 | return false; | |
807 | ||
86155656 TT |
808 | stats = sq->stats; |
809 | ||
e586b3b0 AV |
810 | npkts = 0; |
811 | nbytes = 0; | |
812 | ||
813 | /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), | |
814 | * otherwise a cq overrun may occur | |
815 | */ | |
816 | sqcc = sq->cc; | |
817 | ||
818 | /* avoid dirtying sq cache line every cqe */ | |
819 | dma_fifo_cc = sq->dma_fifo_cc; | |
820 | ||
4b7dfc99 TT |
821 | i = 0; |
822 | do { | |
b57e66ad | 823 | struct mlx5e_tx_wqe_info *wi; |
059ba072 AS |
824 | u16 wqe_counter; |
825 | bool last_wqe; | |
b57e66ad | 826 | u16 ci; |
e586b3b0 | 827 | |
a1f5a1a8 AS |
828 | mlx5_cqwq_pop(&cq->wq); |
829 | ||
059ba072 AS |
830 | wqe_counter = be16_to_cpu(cqe->wqe_counter); |
831 | ||
832 | do { | |
059ba072 AS |
833 | last_wqe = (sqcc == wqe_counter); |
834 | ||
ddf385e3 | 835 | ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); |
31391048 | 836 | wi = &sq->db.wqe_info[ci]; |
e586b3b0 | 837 | |
8e4b53f6 MM |
838 | sqcc += wi->num_wqebbs; |
839 | ||
338c46c6 MM |
840 | if (likely(wi->skb)) { |
841 | mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); | |
842 | mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget); | |
843 | ||
844 | npkts++; | |
845 | nbytes += wi->num_bytes; | |
059ba072 AS |
846 | continue; |
847 | } | |
e586b3b0 | 848 | |
338c46c6 MM |
849 | if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, |
850 | &dma_fifo_cc))) | |
851 | continue; | |
e586b3b0 | 852 | |
338c46c6 MM |
853 | if (wi->num_fifo_pkts) { |
854 | mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); | |
855 | mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget); | |
856 | ||
857 | npkts += wi->num_fifo_pkts; | |
858 | nbytes += wi->num_bytes; | |
859 | } | |
059ba072 | 860 | } while (!last_wqe); |
4b7dfc99 | 861 | |
b57e66ad TT |
862 | if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { |
863 | if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, | |
864 | &sq->state)) { | |
f1b95753 | 865 | mlx5e_dump_error_cqe(&sq->cq, sq->sqn, |
b57e66ad TT |
866 | (struct mlx5_err_cqe *)cqe); |
867 | mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); | |
4d0b7ef9 | 868 | queue_work(cq->priv->wq, &sq->recover_work); |
b57e66ad TT |
869 | } |
870 | stats->cqe_err++; | |
871 | } | |
872 | ||
4b7dfc99 | 873 | } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); |
e586b3b0 | 874 | |
86155656 TT |
875 | stats->cqes += i; |
876 | ||
e586b3b0 AV |
877 | mlx5_cqwq_update_db_record(&cq->wq); |
878 | ||
879 | /* ensure cq space is freed before enabling more cqes */ | |
880 | wmb(); | |
881 | ||
882 | sq->dma_fifo_cc = dma_fifo_cc; | |
883 | sq->cc = sqcc; | |
884 | ||
885 | netdev_tx_completed_queue(sq->txq, npkts, nbytes); | |
886 | ||
887 | if (netif_tx_queue_stopped(sq->txq) && | |
01614d4f | 888 | mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && |
db75373c | 889 | !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { |
6e8dd6d6 | 890 | netif_tx_wake_queue(sq->txq); |
86155656 | 891 | stats->wake++; |
e586b3b0 | 892 | } |
e586b3b0 | 893 | |
59a7c2fd | 894 | return (i == MLX5E_TX_CQ_POLL_BUDGET); |
e586b3b0 | 895 | } |
6e8dd6d6 | 896 | |
338c46c6 MM |
897 | static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi) |
898 | { | |
899 | int i; | |
900 | ||
901 | for (i = 0; i < wi->num_fifo_pkts; i++) | |
0b676aae | 902 | dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo)); |
338c46c6 MM |
903 | } |
904 | ||
31391048 | 905 | void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) |
6e8dd6d6 SM |
906 | { |
907 | struct mlx5e_tx_wqe_info *wi; | |
5e911e2c MS |
908 | u32 dma_fifo_cc, nbytes = 0; |
909 | u16 ci, sqcc, npkts = 0; | |
6e8dd6d6 | 910 | |
2c559361 TT |
911 | sqcc = sq->cc; |
912 | dma_fifo_cc = sq->dma_fifo_cc; | |
913 | ||
914 | while (sqcc != sq->pc) { | |
915 | ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); | |
31391048 | 916 | wi = &sq->db.wqe_info[ci]; |
6e8dd6d6 | 917 | |
8e4b53f6 MM |
918 | sqcc += wi->num_wqebbs; |
919 | ||
338c46c6 MM |
920 | if (likely(wi->skb)) { |
921 | mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); | |
922 | dev_kfree_skb_any(wi->skb); | |
923 | ||
924 | npkts++; | |
925 | nbytes += wi->num_bytes; | |
6e8dd6d6 SM |
926 | continue; |
927 | } | |
928 | ||
338c46c6 MM |
929 | if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc))) |
930 | continue; | |
8e4b53f6 | 931 | |
338c46c6 MM |
932 | if (wi->num_fifo_pkts) { |
933 | mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); | |
934 | mlx5e_tx_wi_kfree_fifo_skbs(sq, wi); | |
935 | ||
936 | npkts += wi->num_fifo_pkts; | |
937 | nbytes += wi->num_bytes; | |
938 | } | |
6e8dd6d6 | 939 | } |
2c559361 TT |
940 | |
941 | sq->dma_fifo_cc = dma_fifo_cc; | |
942 | sq->cc = sqcc; | |
5e911e2c MS |
943 | |
944 | netdev_tx_completed_queue(sq->txq, npkts, nbytes); | |
6e8dd6d6 | 945 | } |
25854544 SM |
946 | |
947 | #ifdef CONFIG_MLX5_CORE_IPOIB | |
25854544 SM |
948 | static inline void |
949 | mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, | |
950 | struct mlx5_wqe_datagram_seg *dseg) | |
951 | { | |
952 | memcpy(&dseg->av, av, sizeof(struct mlx5_av)); | |
953 | dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV); | |
954 | dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); | |
955 | } | |
956 | ||
8e4b53f6 MM |
957 | static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb, |
958 | const struct mlx5e_tx_attr *attr, | |
959 | struct mlx5e_tx_wqe_attr *wqe_attr) | |
960 | { | |
961 | u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS; | |
962 | u16 ds_cnt_inl = 0; | |
963 | ||
964 | ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags; | |
965 | ||
966 | if (attr->ihs) { | |
967 | u16 inl = attr->ihs - INL_HDR_START_SZ; | |
968 | ||
969 | ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); | |
970 | ds_cnt += ds_cnt_inl; | |
971 | } | |
972 | ||
973 | *wqe_attr = (struct mlx5e_tx_wqe_attr) { | |
974 | .ds_cnt = ds_cnt, | |
975 | .ds_cnt_inl = ds_cnt_inl, | |
976 | .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS), | |
977 | }; | |
978 | } | |
979 | ||
3df711db MM |
980 | void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, |
981 | struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more) | |
25854544 | 982 | { |
8e4b53f6 MM |
983 | struct mlx5e_tx_wqe_attr wqe_attr; |
984 | struct mlx5e_tx_attr attr; | |
043dc78e | 985 | struct mlx5i_tx_wqe *wqe; |
25854544 | 986 | |
043dc78e TT |
987 | struct mlx5_wqe_datagram_seg *datagram; |
988 | struct mlx5_wqe_ctrl_seg *cseg; | |
989 | struct mlx5_wqe_eth_seg *eseg; | |
990 | struct mlx5_wqe_data_seg *dseg; | |
991 | struct mlx5e_tx_wqe_info *wi; | |
25854544 | 992 | |
05909bab | 993 | struct mlx5e_sq_stats *stats = sq->stats; |
25854544 | 994 | int num_dma; |
8e4b53f6 | 995 | u16 pi; |
25854544 | 996 | |
8e4b53f6 MM |
997 | mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr); |
998 | mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr); | |
b431302e | 999 | |
8e4b53f6 MM |
1000 | pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); |
1001 | wqe = MLX5I_SQ_FETCH_WQE(sq, pi); | |
25854544 | 1002 | |
299a1195 | 1003 | stats->xmit_more += xmit_more; |
4ec5cf78 | 1004 | |
043dc78e TT |
1005 | /* fill wqe */ |
1006 | wi = &sq->db.wqe_info[pi]; | |
1007 | cseg = &wqe->ctrl; | |
1008 | datagram = &wqe->datagram; | |
1009 | eseg = &wqe->eth; | |
1010 | dseg = wqe->data; | |
1011 | ||
1012 | mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); | |
1013 | ||
b336e6b2 | 1014 | mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg); |
043dc78e | 1015 | |
8e4b53f6 | 1016 | eseg->mss = attr.mss; |
043dc78e | 1017 | |
8e4b53f6 MM |
1018 | if (attr.ihs) { |
1019 | memcpy(eseg->inline_hdr.start, skb->data, attr.ihs); | |
1020 | eseg->inline_hdr.sz = cpu_to_be16(attr.ihs); | |
1021 | dseg += wqe_attr.ds_cnt_inl; | |
25854544 SM |
1022 | } |
1023 | ||
8e4b53f6 MM |
1024 | num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs, |
1025 | attr.headlen, dseg); | |
25854544 | 1026 | if (unlikely(num_dma < 0)) |
d9a96ec3 | 1027 | goto err_drop; |
25854544 | 1028 | |
8e4b53f6 | 1029 | mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more); |
25854544 | 1030 | |
3df711db | 1031 | return; |
25854544 | 1032 | |
d9a96ec3 | 1033 | err_drop: |
05909bab | 1034 | stats->dropped++; |
25854544 | 1035 | dev_kfree_skb_any(skb); |
25854544 | 1036 | } |
25854544 | 1037 | #endif |