net/mlx5: Expose ts_cqe_metadata_size2wqe_counter
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tx.c
CommitLineData
e586b3b0 1/*
98795158 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
e586b3b0
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/tcp.h>
34#include <linux/if_vlan.h>
e3cfc7e6 35#include <net/geneve.h>
fbcb127e 36#include <net/dsfield.h>
e586b3b0 37#include "en.h"
542578c6 38#include "en/txrx.h"
4301ba7b 39#include "ipoib/ipoib.h"
bf239741 40#include "en_accel/en_accel.h"
428ffea0 41#include "en_accel/ipsec_rxtx.h"
145e5637 42#include "en/ptp.h"
de78960e 43#include <net/ipv6.h>
e586b3b0 44
31391048 45static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
e586b3b0 46{
d4e28cbd
AS
47 int i;
48
34802a42 49 for (i = 0; i < num_dma; i++) {
d4e28cbd
AS
50 struct mlx5e_sq_dma *last_pushed_dma =
51 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
52
53 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
54 }
e586b3b0
AV
55}
56
ae76715d
HHZ
57static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
58{
59#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
60
61 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
62}
63
64static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
65{
ae76715d
HHZ
66 if (skb_transport_header_was_set(skb))
67 return skb_transport_offset(skb);
ae76715d
HHZ
68 else
69 return mlx5e_skb_l2_header_offset(skb);
70}
71
6aace17e
MS
72static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
73 struct sk_buff *skb)
ae76715d 74{
6aace17e 75 u16 hlen;
ae76715d
HHZ
76
77 switch (mode) {
a6f402e4
SM
78 case MLX5_INLINE_MODE_NONE:
79 return 0;
ae76715d 80 case MLX5_INLINE_MODE_TCP_UDP:
c43f1255 81 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
ae76715d
HHZ
82 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
83 hlen += VLAN_HLEN;
6aace17e 84 break;
ae76715d 85 case MLX5_INLINE_MODE_IP:
3517dfe6
MM
86 hlen = mlx5e_skb_l3_header_offset(skb);
87 break;
ae76715d
HHZ
88 case MLX5_INLINE_MODE_L2:
89 default:
6aace17e 90 hlen = mlx5e_skb_l2_header_offset(skb);
ae76715d 91 }
f600c608 92 return min_t(u16, hlen, skb_headlen(skb));
ae76715d
HHZ
93}
94
de78960e
ED
95#define MLX5_UNSAFE_MEMCPY_DISCLAIMER \
96 "This copy has been bounds-checked earlier in " \
97 "mlx5i_sq_calc_wqe_attr() and intentionally " \
98 "crosses a flex array boundary. Since it is " \
99 "performance sensitive, splitting the copy is " \
100 "undesirable."
101
5e7d77a9 102static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
e4cf27bd
AS
103{
104 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
105 int cpy1_sz = 2 * ETH_ALEN;
3ea4891d 106 int cpy2_sz = ihs - cpy1_sz;
e4cf27bd 107
6d5c900e 108 memcpy(&vhdr->addrs, skb->data, cpy1_sz);
e4cf27bd
AS
109 vhdr->h_vlan_proto = skb->vlan_proto;
110 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
de78960e
ED
111 unsafe_memcpy(&vhdr->h_vlan_encapsulated_proto,
112 skb->data + cpy1_sz,
113 cpy2_sz,
114 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
e4cf27bd
AS
115}
116
77bdf895 117static inline void
b336e6b2
TT
118mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
119 struct mlx5e_accel_tx_state *accel,
120 struct mlx5_wqe_eth_seg *eseg)
e586b3b0 121{
428ffea0 122 if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
1d000323 123 return;
1d000323 124
98795158
MF
125 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
126 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
89db09eb 127 if (skb->encapsulation) {
98795158
MF
128 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
129 MLX5_ETH_WQE_L4_INNER_CSUM;
05909bab 130 sq->stats->csum_partial_inner++;
89db09eb 131 } else {
98795158 132 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
05909bab 133 sq->stats->csum_partial++;
89db09eb 134 }
b336e6b2
TT
135#ifdef CONFIG_MLX5_EN_TLS
136 } else if (unlikely(accel && accel->tls.tls_tisn)) {
137 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
138 sq->stats->csum_partial++;
139#endif
98795158 140 } else
05909bab 141 sq->stats->csum_none++;
77bdf895 142}
e586b3b0 143
de78960e
ED
144/* Returns the number of header bytes that we plan
145 * to inline later in the transmit descriptor
146 */
77bdf895 147static inline u16
de78960e 148mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
77bdf895 149{
05909bab 150 struct mlx5e_sq_stats *stats = sq->stats;
77bdf895 151 u16 ihs;
98795158 152
de78960e 153 *hopbyhop = 0;
77bdf895 154 if (skb->encapsulation) {
504148fe 155 ihs = skb_tcp_all_headers(skb);
05909bab
EBE
156 stats->tso_inner_packets++;
157 stats->tso_inner_bytes += skb->len - ihs;
e586b3b0 158 } else {
de78960e 159 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
689adf0d 160 ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
de78960e 161 } else {
504148fe 162 ihs = skb_tcp_all_headers(skb);
de78960e
ED
163 if (ipv6_has_hopopt_jumbo(skb)) {
164 *hopbyhop = sizeof(struct hop_jumbo_hdr);
165 ihs -= sizeof(struct hop_jumbo_hdr);
166 }
167 }
05909bab 168 stats->tso_packets++;
de78960e 169 stats->tso_bytes += skb->len - ihs - *hopbyhop;
e586b3b0
AV
170 }
171
77bdf895
SM
172 return ihs;
173}
e586b3b0 174
77bdf895
SM
175static inline int
176mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
177 unsigned char *skb_data, u16 headlen,
178 struct mlx5_wqe_data_seg *dseg)
179{
180 dma_addr_t dma_addr = 0;
181 u8 num_dma = 0;
182 int i;
e586b3b0 183
e586b3b0 184 if (headlen) {
34802a42 185 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
e586b3b0
AV
186 DMA_TO_DEVICE);
187 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
d9a96ec3 188 goto dma_unmap_wqe_err;
e586b3b0
AV
189
190 dseg->addr = cpu_to_be64(dma_addr);
191 dseg->lkey = sq->mkey_be;
192 dseg->byte_count = cpu_to_be32(headlen);
193
d4e28cbd 194 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
77bdf895 195 num_dma++;
e586b3b0
AV
196 dseg++;
197 }
198
199 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
d7840976 200 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
e586b3b0
AV
201 int fsz = skb_frag_size(frag);
202
203 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
e53eef63 204 DMA_TO_DEVICE);
e586b3b0 205 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
d9a96ec3 206 goto dma_unmap_wqe_err;
e586b3b0
AV
207
208 dseg->addr = cpu_to_be64(dma_addr);
209 dseg->lkey = sq->mkey_be;
210 dseg->byte_count = cpu_to_be32(fsz);
211
d4e28cbd 212 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
77bdf895 213 num_dma++;
e586b3b0
AV
214 dseg++;
215 }
216
77bdf895 217 return num_dma;
d9a96ec3
TT
218
219dma_unmap_wqe_err:
220 mlx5e_dma_unmap_wqe_err(sq, num_dma);
221 return -ENOMEM;
77bdf895 222}
e586b3b0 223
8e4b53f6
MM
224struct mlx5e_tx_attr {
225 u32 num_bytes;
226 u16 headlen;
227 u16 ihs;
228 __be16 mss;
5be01904 229 u16 insz;
8e4b53f6 230 u8 opcode;
de78960e 231 u8 hopbyhop;
8e4b53f6
MM
232};
233
234struct mlx5e_tx_wqe_attr {
235 u16 ds_cnt;
236 u16 ds_cnt_inl;
5be01904 237 u16 ds_cnt_ids;
8e4b53f6
MM
238 u8 num_wqebbs;
239};
d02dfcd5
MM
240
241static u8
8e4b53f6
MM
242mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
243 struct mlx5e_accel_tx_state *accel)
d02dfcd5
MM
244{
245 u8 mode;
246
8e4b53f6
MM
247#ifdef CONFIG_MLX5_EN_TLS
248 if (accel && accel->tls.tls_tisn)
d02dfcd5 249 return MLX5_INLINE_MODE_TCP_UDP;
8e4b53f6 250#endif
d02dfcd5
MM
251
252 mode = sq->min_inline_mode;
253
254 if (skb_vlan_tag_present(skb) &&
255 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
256 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
257
258 return mode;
259}
260
8e4b53f6
MM
261static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
262 struct mlx5e_accel_tx_state *accel,
263 struct mlx5e_tx_attr *attr)
264{
265 struct mlx5e_sq_stats *stats = sq->stats;
266
267 if (skb_is_gso(skb)) {
de78960e
ED
268 int hopbyhop;
269 u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
8e4b53f6
MM
270
271 *attr = (struct mlx5e_tx_attr) {
272 .opcode = MLX5_OPCODE_LSO,
273 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
274 .ihs = ihs,
275 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
de78960e
ED
276 .headlen = skb_headlen(skb) - ihs - hopbyhop,
277 .hopbyhop = hopbyhop,
8e4b53f6
MM
278 };
279
280 stats->packets += skb_shinfo(skb)->gso_segs;
281 } else {
282 u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
283 u16 ihs = mlx5e_calc_min_inline(mode, skb);
284
285 *attr = (struct mlx5e_tx_attr) {
286 .opcode = MLX5_OPCODE_SEND,
287 .mss = cpu_to_be16(0),
288 .ihs = ihs,
289 .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
290 .headlen = skb_headlen(skb) - ihs,
291 };
292
293 stats->packets++;
294 }
295
5be01904 296 attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
8e4b53f6
MM
297 stats->bytes += attr->num_bytes;
298}
299
300static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
301 struct mlx5e_tx_wqe_attr *wqe_attr)
302{
97e3afd6 303 u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
8e4b53f6 304 u16 ds_cnt_inl = 0;
5be01904 305 u16 ds_cnt_ids = 0;
8e4b53f6 306
5be01904
RS
307 if (attr->insz)
308 ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
309 MLX5_SEND_WQE_DS);
8e4b53f6 310
5be01904 311 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
8e4b53f6
MM
312 if (attr->ihs) {
313 u16 inl = attr->ihs - INL_HDR_START_SZ;
314
315 if (skb_vlan_tag_present(skb))
316 inl += VLAN_HLEN;
317
318 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
319 ds_cnt += ds_cnt_inl;
320 }
321
322 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
323 .ds_cnt = ds_cnt,
324 .ds_cnt_inl = ds_cnt_inl,
5be01904 325 .ds_cnt_ids = ds_cnt_ids,
8e4b53f6
MM
326 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
327 };
328}
329
67044a88
MM
330static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
331{
332 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
333 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
334}
335
336static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
337{
338 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
339 netif_tx_stop_queue(sq->txq);
340 sq->stats->stopped++;
341 }
342}
343
5b759bf2
MM
344static void mlx5e_tx_flush(struct mlx5e_txqsq *sq)
345{
346 struct mlx5e_tx_wqe_info *wi;
347 struct mlx5e_tx_wqe *wqe;
348 u16 pi;
349
350 /* Must not be called when a MPWQE session is active but empty. */
351 mlx5e_tx_mpwqe_ensure_complete(sq);
352
353 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
354 wi = &sq->db.wqe_info[pi];
355
356 *wi = (struct mlx5e_tx_wqe_info) {
357 .num_wqebbs = 1,
358 };
359
360 wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
361 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
362}
363
77bdf895
SM
364static inline void
365mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
8e4b53f6
MM
366 const struct mlx5e_tx_attr *attr,
367 const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
3c31ff22
FW
368 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
369 bool xmit_more)
77bdf895
SM
370{
371 struct mlx5_wq_cyc *wq = &sq->wq;
ca6c7df0 372 bool send_doorbell;
e586b3b0 373
8ba6f183
MM
374 *wi = (struct mlx5e_tx_wqe_info) {
375 .skb = skb,
8e4b53f6 376 .num_bytes = attr->num_bytes,
8ba6f183 377 .num_dma = num_dma,
8e4b53f6 378 .num_wqebbs = wqe_attr->num_wqebbs,
338c46c6 379 .num_fifo_pkts = 0,
8ba6f183 380 };
e586b3b0 381
8e4b53f6
MM
382 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
383 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
77bdf895 384
67044a88 385 mlx5e_tx_skb_update_hwts_flags(skb);
ef9814de 386
77bdf895 387 sq->pc += wi->num_wqebbs;
67044a88
MM
388
389 mlx5e_tx_check_stop(sq);
e586b3b0 390
1880bc4e
EBE
391 if (unlikely(sq->ptpsq)) {
392 mlx5e_skb_cb_hwtstamp_init(skb);
393 mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
394 skb_get(skb);
395 }
396
8e4b53f6 397 send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
ca6c7df0 398 if (send_doorbell)
864b2d71 399 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
77bdf895
SM
400}
401
8e4b53f6
MM
402static void
403mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
404 const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
405 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
77bdf895 406{
043dc78e
TT
407 struct mlx5_wqe_ctrl_seg *cseg;
408 struct mlx5_wqe_eth_seg *eseg;
409 struct mlx5_wqe_data_seg *dseg;
410 struct mlx5e_tx_wqe_info *wi;
de78960e
ED
411 u16 ihs = attr->ihs;
412 struct ipv6hdr *h6;
05909bab 413 struct mlx5e_sq_stats *stats = sq->stats;
77bdf895 414 int num_dma;
043dc78e 415
299a1195 416 stats->xmit_more += xmit_more;
77bdf895 417
043dc78e
TT
418 /* fill wqe */
419 wi = &sq->db.wqe_info[pi];
420 cseg = &wqe->ctrl;
421 eseg = &wqe->eth;
422 dseg = wqe->data;
423
8e4b53f6 424 eseg->mss = attr->mss;
043dc78e 425
de78960e
ED
426 if (ihs) {
427 u8 *start = eseg->inline_hdr.start;
428
429 if (unlikely(attr->hopbyhop)) {
430 /* remove the HBH header.
431 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
432 */
433 if (skb_vlan_tag_present(skb)) {
434 mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
435 ihs += VLAN_HLEN;
436 h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
437 } else {
438 unsafe_memcpy(start, skb->data,
439 ETH_HLEN + sizeof(*h6),
440 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
441 h6 = (struct ipv6hdr *)(start + ETH_HLEN);
442 }
443 h6->nexthdr = IPPROTO_TCP;
444 /* Copy the TCP header after the IPv6 one */
445 memcpy(h6 + 1,
446 skb->data + ETH_HLEN + sizeof(*h6) +
447 sizeof(struct hop_jumbo_hdr),
448 tcp_hdrlen(skb));
449 /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
450 } else if (skb_vlan_tag_present(skb)) {
451 mlx5e_insert_vlan(start, skb, ihs);
452 ihs += VLAN_HLEN;
05909bab 453 stats->added_vlan_packets++;
77bdf895 454 } else {
de78960e
ED
455 unsafe_memcpy(eseg->inline_hdr.start, skb->data,
456 attr->ihs,
457 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
77bdf895 458 }
de78960e 459 eseg->inline_hdr.sz |= cpu_to_be16(ihs);
8e4b53f6 460 dseg += wqe_attr->ds_cnt_inl;
77bdf895
SM
461 } else if (skb_vlan_tag_present(skb)) {
462 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
4382c7b9
GP
463 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
464 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
77bdf895 465 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
05909bab 466 stats->added_vlan_packets++;
77bdf895
SM
467 }
468
5be01904 469 dseg += wqe_attr->ds_cnt_ids;
de78960e 470 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
8e4b53f6 471 attr->headlen, dseg);
77bdf895 472 if (unlikely(num_dma < 0))
d9a96ec3 473 goto err_drop;
77bdf895 474
8e4b53f6 475 mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
12be4b21 476
3df711db 477 return;
e586b3b0 478
d9a96ec3 479err_drop:
05909bab 480 stats->dropped++;
e586b3b0 481 dev_kfree_skb_any(skb);
5b759bf2 482 mlx5e_tx_flush(sq);
e586b3b0
AV
483}
484
5af75c74
MM
485static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
486{
5be01904
RS
487 return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
488 !attr->insz;
5af75c74
MM
489}
490
491static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
492{
493 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
494
495 /* Assumes the session is already running and has at least one packet. */
496 return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
497}
498
499static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
500 struct mlx5_wqe_eth_seg *eseg)
501{
502 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
503 struct mlx5e_tx_wqe *wqe;
504 u16 pi;
505
76c31e5f 506 pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
5af75c74 507 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
991b2654 508 net_prefetchw(wqe->data);
5af75c74
MM
509
510 *session = (struct mlx5e_tx_mpwqe) {
511 .wqe = wqe,
512 .bytes_count = 0,
513 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
514 .pkt_count = 0,
515 .inline_on = 0,
516 };
517
518 memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
519
520 sq->stats->mpwqe_blks++;
521}
522
523static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
524{
525 return sq->mpwqe.wqe;
526}
527
528static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
529{
530 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
531 struct mlx5_wqe_data_seg *dseg;
532
533 dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
534
535 session->pkt_count++;
536 session->bytes_count += txd->len;
537
538 dseg->addr = cpu_to_be64(txd->dma_addr);
539 dseg->byte_count = cpu_to_be32(txd->len);
540 dseg->lkey = sq->mkey_be;
541 session->ds_count++;
542
543 sq->stats->mpwqe_pkts++;
544}
545
546static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
547{
548 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
549 u8 ds_count = session->ds_count;
550 struct mlx5_wqe_ctrl_seg *cseg;
551 struct mlx5e_tx_wqe_info *wi;
552 u16 pi;
553
554 cseg = &session->wqe->ctrl;
555 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
556 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
557
558 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
559 wi = &sq->db.wqe_info[pi];
560 *wi = (struct mlx5e_tx_wqe_info) {
561 .skb = NULL,
562 .num_bytes = session->bytes_count,
563 .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
564 .num_dma = session->pkt_count,
565 .num_fifo_pkts = session->pkt_count,
566 };
567
568 sq->pc += wi->num_wqebbs;
569
570 session->wqe = NULL;
571
572 mlx5e_tx_check_stop(sq);
573
574 return cseg;
575}
576
577static void
578mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
579 struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
580{
581 struct mlx5_wqe_ctrl_seg *cseg;
582 struct mlx5e_xmit_data txd;
583
5b759bf2
MM
584 txd.data = skb->data;
585 txd.len = skb->len;
586
587 txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
588 if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
589 goto err_unmap;
590
5af75c74
MM
591 if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
592 mlx5e_tx_mpwqe_session_start(sq, eseg);
593 } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
594 mlx5e_tx_mpwqe_session_complete(sq);
595 mlx5e_tx_mpwqe_session_start(sq, eseg);
596 }
597
598 sq->stats->xmit_more += xmit_more;
599
5af75c74 600 mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
0b676aae 601 mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
5af75c74 602 mlx5e_tx_mpwqe_add_dseg(sq, &txd);
5af75c74
MM
603 mlx5e_tx_skb_update_hwts_flags(skb);
604
76c31e5f 605 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
5af75c74
MM
606 /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
607 cseg = mlx5e_tx_mpwqe_session_complete(sq);
608
609 if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
610 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
611 } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
612 /* Might stop the queue, but we were asked to ring the doorbell anyway. */
613 cseg = mlx5e_tx_mpwqe_session_complete(sq);
614
615 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
616 }
617
618 return;
619
620err_unmap:
621 mlx5e_dma_unmap_wqe_err(sq, 1);
622 sq->stats->dropped++;
623 dev_kfree_skb_any(skb);
5b759bf2 624 mlx5e_tx_flush(sq);
5af75c74
MM
625}
626
627void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
628{
629 /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */
630 if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
631 mlx5e_tx_mpwqe_session_complete(sq);
632}
633
f68406ca 634static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
b336e6b2 635 struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
b544011f 636 struct mlx5_wqe_eth_seg *eseg, u16 ihs)
5af75c74 637{
f68406ca 638 mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
b336e6b2 639 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
5af75c74
MM
640}
641
e586b3b0
AV
642netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
643{
644 struct mlx5e_priv *priv = netdev_priv(dev);
714c88a3 645 struct mlx5e_accel_tx_state accel = {};
8e4b53f6
MM
646 struct mlx5e_tx_wqe_attr wqe_attr;
647 struct mlx5e_tx_attr attr;
bf239741
IL
648 struct mlx5e_tx_wqe *wqe;
649 struct mlx5e_txqsq *sq;
650 u16 pi;
2ac9cfe7 651
17c84cb4
MM
652 /* All changes to txq2sq are performed in sync with mlx5e_xmit, when the
653 * queue being changed is disabled, and smp_wmb guarantees that the
654 * changes are visible before mlx5e_xmit tries to read from txq2sq. It
655 * guarantees that the value of txq2sq[qid] doesn't change while
656 * mlx5e_xmit is running on queue number qid. smb_wmb is paired with
657 * HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE.
658 */
bf239741 659 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
214baf22 660 if (unlikely(!sq)) {
17c84cb4
MM
661 /* Two cases when sq can be NULL:
662 * 1. The HTB node is registered, and mlx5e_select_queue
663 * selected its queue ID, but the SQ itself is not yet created.
664 * 2. HTB SQ creation failed. Similar to the previous case, but
665 * the SQ won't be created.
666 */
214baf22
MM
667 dev_kfree_skb_any(skb);
668 return NETDEV_TX_OK;
669 }
714c88a3
MM
670
671 /* May send SKBs and WQEs. */
672 if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
5af75c74 673 return NETDEV_TX_OK;
714c88a3 674
8e4b53f6 675 mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
5af75c74
MM
676
677 if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
678 if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
679 struct mlx5_wqe_eth_seg eseg = {};
680
f68406ca 681 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs);
5af75c74
MM
682 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
683 return NETDEV_TX_OK;
684 }
685
686 mlx5e_tx_mpwqe_ensure_complete(sq);
687 }
688
8e4b53f6
MM
689 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
690 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
fed0c6cf 691 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
2ac9cfe7 692
714c88a3 693 /* May update the WQE, but may not post other WQEs. */
5be01904
RS
694 mlx5e_accel_tx_finish(sq, wqe, &accel,
695 (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
f68406ca 696 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs);
8e4b53f6 697 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
689adf0d 698
3df711db 699 return NETDEV_TX_OK;
e586b3b0
AV
700}
701
8e4b53f6
MM
702void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
703{
704 struct mlx5e_tx_wqe_attr wqe_attr;
705 struct mlx5e_tx_attr attr;
706 struct mlx5e_tx_wqe *wqe;
707 u16 pi;
708
709 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
710 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
711 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
712 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
b336e6b2 713 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
8e4b53f6
MM
714 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
715}
716
717static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
718 u32 *dma_fifo_cc)
719{
720 int i;
721
722 for (i = 0; i < wi->num_dma; i++) {
723 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
724
725 mlx5e_tx_dma_unmap(sq->pdev, dma);
726 }
727}
728
729static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
730 struct mlx5_cqe64 *cqe, int napi_budget)
731{
732 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
733 struct skb_shared_hwtstamps hwts = {};
734 u64 ts = get_cqe_ts(cqe);
735
432119de 736 hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
1880bc4e
EBE
737 if (sq->ptpsq)
738 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
739 hwts.hwtstamp, sq->ptpsq->cq_stats);
740 else
741 skb_tstamp_tx(skb, &hwts);
8e4b53f6
MM
742 }
743
744 napi_consume_skb(skb, napi_budget);
745}
746
338c46c6
MM
747static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
748 struct mlx5_cqe64 *cqe, int napi_budget)
749{
750 int i;
751
752 for (i = 0; i < wi->num_fifo_pkts; i++) {
0b676aae 753 struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
338c46c6
MM
754
755 mlx5e_consume_skb(sq, skb, cqe, napi_budget);
756 }
757}
758
8ec736e5 759bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
e586b3b0 760{
86155656 761 struct mlx5e_sq_stats *stats;
31391048 762 struct mlx5e_txqsq *sq;
4b7dfc99 763 struct mlx5_cqe64 *cqe;
e586b3b0
AV
764 u32 dma_fifo_cc;
765 u32 nbytes;
766 u16 npkts;
767 u16 sqcc;
768 int i;
769
31391048 770 sq = container_of(cq, struct mlx5e_txqsq, cq);
e586b3b0 771
0e5c04f6 772 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
29429f33
DJ
773 return false;
774
4b7dfc99
TT
775 cqe = mlx5_cqwq_get_cqe(&cq->wq);
776 if (!cqe)
777 return false;
778
86155656
TT
779 stats = sq->stats;
780
e586b3b0
AV
781 npkts = 0;
782 nbytes = 0;
783
784 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
785 * otherwise a cq overrun may occur
786 */
787 sqcc = sq->cc;
788
789 /* avoid dirtying sq cache line every cqe */
790 dma_fifo_cc = sq->dma_fifo_cc;
791
4b7dfc99
TT
792 i = 0;
793 do {
b57e66ad 794 struct mlx5e_tx_wqe_info *wi;
059ba072
AS
795 u16 wqe_counter;
796 bool last_wqe;
b57e66ad 797 u16 ci;
e586b3b0 798
a1f5a1a8
AS
799 mlx5_cqwq_pop(&cq->wq);
800
059ba072
AS
801 wqe_counter = be16_to_cpu(cqe->wqe_counter);
802
803 do {
059ba072
AS
804 last_wqe = (sqcc == wqe_counter);
805
ddf385e3 806 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
31391048 807 wi = &sq->db.wqe_info[ci];
e586b3b0 808
8e4b53f6
MM
809 sqcc += wi->num_wqebbs;
810
338c46c6
MM
811 if (likely(wi->skb)) {
812 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
813 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
814
815 npkts++;
816 nbytes += wi->num_bytes;
059ba072
AS
817 continue;
818 }
e586b3b0 819
338c46c6
MM
820 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
821 &dma_fifo_cc)))
822 continue;
e586b3b0 823
338c46c6
MM
824 if (wi->num_fifo_pkts) {
825 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
826 mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
827
828 npkts += wi->num_fifo_pkts;
829 nbytes += wi->num_bytes;
830 }
059ba072 831 } while (!last_wqe);
4b7dfc99 832
b57e66ad
TT
833 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
834 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
835 &sq->state)) {
f1b95753 836 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
b57e66ad
TT
837 (struct mlx5_err_cqe *)cqe);
838 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
4d0b7ef9 839 queue_work(cq->priv->wq, &sq->recover_work);
b57e66ad
TT
840 }
841 stats->cqe_err++;
842 }
843
4b7dfc99 844 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
e586b3b0 845
86155656
TT
846 stats->cqes += i;
847
e586b3b0
AV
848 mlx5_cqwq_update_db_record(&cq->wq);
849
850 /* ensure cq space is freed before enabling more cqes */
851 wmb();
852
853 sq->dma_fifo_cc = dma_fifo_cc;
854 sq->cc = sqcc;
855
856 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
857
858 if (netif_tx_queue_stopped(sq->txq) &&
01614d4f 859 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
db75373c 860 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
6e8dd6d6 861 netif_tx_wake_queue(sq->txq);
86155656 862 stats->wake++;
e586b3b0 863 }
e586b3b0 864
59a7c2fd 865 return (i == MLX5E_TX_CQ_POLL_BUDGET);
e586b3b0 866}
6e8dd6d6 867
338c46c6
MM
868static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
869{
870 int i;
871
872 for (i = 0; i < wi->num_fifo_pkts; i++)
0b676aae 873 dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
338c46c6
MM
874}
875
31391048 876void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
6e8dd6d6
SM
877{
878 struct mlx5e_tx_wqe_info *wi;
5e911e2c
MS
879 u32 dma_fifo_cc, nbytes = 0;
880 u16 ci, sqcc, npkts = 0;
6e8dd6d6 881
2c559361
TT
882 sqcc = sq->cc;
883 dma_fifo_cc = sq->dma_fifo_cc;
884
885 while (sqcc != sq->pc) {
886 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
31391048 887 wi = &sq->db.wqe_info[ci];
6e8dd6d6 888
8e4b53f6
MM
889 sqcc += wi->num_wqebbs;
890
338c46c6
MM
891 if (likely(wi->skb)) {
892 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
893 dev_kfree_skb_any(wi->skb);
894
895 npkts++;
896 nbytes += wi->num_bytes;
6e8dd6d6
SM
897 continue;
898 }
899
338c46c6
MM
900 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
901 continue;
8e4b53f6 902
338c46c6
MM
903 if (wi->num_fifo_pkts) {
904 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
905 mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
906
907 npkts += wi->num_fifo_pkts;
908 nbytes += wi->num_bytes;
909 }
6e8dd6d6 910 }
2c559361
TT
911
912 sq->dma_fifo_cc = dma_fifo_cc;
913 sq->cc = sqcc;
5e911e2c
MS
914
915 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
6e8dd6d6 916}
25854544
SM
917
918#ifdef CONFIG_MLX5_CORE_IPOIB
25854544
SM
919static inline void
920mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
921 struct mlx5_wqe_datagram_seg *dseg)
922{
923 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
924 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
925 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
926}
927
8e4b53f6
MM
928static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
929 const struct mlx5e_tx_attr *attr,
930 struct mlx5e_tx_wqe_attr *wqe_attr)
931{
932 u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
933 u16 ds_cnt_inl = 0;
934
935 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
936
937 if (attr->ihs) {
938 u16 inl = attr->ihs - INL_HDR_START_SZ;
939
940 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
941 ds_cnt += ds_cnt_inl;
942 }
943
944 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
945 .ds_cnt = ds_cnt,
946 .ds_cnt_inl = ds_cnt_inl,
947 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
948 };
949}
950
3df711db
MM
951void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
952 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
25854544 953{
8e4b53f6
MM
954 struct mlx5e_tx_wqe_attr wqe_attr;
955 struct mlx5e_tx_attr attr;
043dc78e 956 struct mlx5i_tx_wqe *wqe;
25854544 957
043dc78e
TT
958 struct mlx5_wqe_datagram_seg *datagram;
959 struct mlx5_wqe_ctrl_seg *cseg;
960 struct mlx5_wqe_eth_seg *eseg;
961 struct mlx5_wqe_data_seg *dseg;
962 struct mlx5e_tx_wqe_info *wi;
25854544 963
05909bab 964 struct mlx5e_sq_stats *stats = sq->stats;
25854544 965 int num_dma;
8e4b53f6 966 u16 pi;
25854544 967
8e4b53f6
MM
968 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
969 mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
b431302e 970
8e4b53f6
MM
971 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
972 wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
25854544 973
299a1195 974 stats->xmit_more += xmit_more;
4ec5cf78 975
043dc78e
TT
976 /* fill wqe */
977 wi = &sq->db.wqe_info[pi];
978 cseg = &wqe->ctrl;
979 datagram = &wqe->datagram;
980 eseg = &wqe->eth;
981 dseg = wqe->data;
982
983 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
984
b336e6b2 985 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
043dc78e 986
8e4b53f6 987 eseg->mss = attr.mss;
043dc78e 988
8e4b53f6 989 if (attr.ihs) {
de78960e
ED
990 if (unlikely(attr.hopbyhop)) {
991 struct ipv6hdr *h6;
992
993 /* remove the HBH header.
994 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
995 */
996 unsafe_memcpy(eseg->inline_hdr.start, skb->data,
997 ETH_HLEN + sizeof(*h6),
998 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
999 h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
1000 h6->nexthdr = IPPROTO_TCP;
1001 /* Copy the TCP header after the IPv6 one */
1002 unsafe_memcpy(h6 + 1,
1003 skb->data + ETH_HLEN + sizeof(*h6) +
1004 sizeof(struct hop_jumbo_hdr),
1005 tcp_hdrlen(skb),
1006 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
1007 /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
1008 } else {
1009 unsafe_memcpy(eseg->inline_hdr.start, skb->data,
1010 attr.ihs,
1011 MLX5_UNSAFE_MEMCPY_DISCLAIMER);
1012 }
8e4b53f6
MM
1013 eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
1014 dseg += wqe_attr.ds_cnt_inl;
25854544
SM
1015 }
1016
de78960e 1017 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
8e4b53f6 1018 attr.headlen, dseg);
25854544 1019 if (unlikely(num_dma < 0))
d9a96ec3 1020 goto err_drop;
25854544 1021
8e4b53f6 1022 mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
25854544 1023
3df711db 1024 return;
25854544 1025
d9a96ec3 1026err_drop:
05909bab 1027 stats->dropped++;
25854544 1028 dev_kfree_skb_any(skb);
5b759bf2 1029 mlx5e_tx_flush(sq);
25854544 1030}
25854544 1031#endif