net/packet: Remove redundant skb->protocol set
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tx.c
CommitLineData
e586b3b0 1/*
98795158 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
e586b3b0
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/tcp.h>
34#include <linux/if_vlan.h>
fbcb127e 35#include <net/dsfield.h>
e586b3b0 36#include "en.h"
4301ba7b 37#include "ipoib/ipoib.h"
bf239741 38#include "en_accel/en_accel.h"
7c39afb3 39#include "lib/clock.h"
e586b3b0 40
12be4b21 41#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
bf239741
IL
42
43#ifndef CONFIG_MLX5_EN_TLS
12be4b21
SM
44#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
45 MLX5E_SQ_NOPS_ROOM)
bf239741
IL
46#else
47/* TLS offload requires MLX5E_SQ_STOP_ROOM to have
48 * enough room for a resync SKB, a normal SKB and a NOP
49 */
50#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\
51 MLX5E_SQ_NOPS_ROOM)
52#endif
12be4b21 53
d4e28cbd
AS
54static inline void mlx5e_tx_dma_unmap(struct device *pdev,
55 struct mlx5e_sq_dma *dma)
e586b3b0 56{
d4e28cbd
AS
57 switch (dma->type) {
58 case MLX5E_DMA_MAP_SINGLE:
59 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
60 break;
61 case MLX5E_DMA_MAP_PAGE:
62 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
63 break;
64 default:
65 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
e586b3b0
AV
66 }
67}
68
8ee48233
TT
69static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
70{
71 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
72}
73
31391048 74static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
d4e28cbd
AS
75 dma_addr_t addr,
76 u32 size,
77 enum mlx5e_dma_map_type map_type)
e586b3b0 78{
8ee48233 79 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
f10b7cc7 80
8ee48233
TT
81 dma->addr = addr;
82 dma->size = size;
83 dma->type = map_type;
d4e28cbd
AS
84}
85
31391048 86static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
e586b3b0 87{
d4e28cbd
AS
88 int i;
89
34802a42 90 for (i = 0; i < num_dma; i++) {
d4e28cbd
AS
91 struct mlx5e_sq_dma *last_pushed_dma =
92 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
93
94 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
95 }
e586b3b0
AV
96}
97
fbcb127e
HN
98#ifdef CONFIG_MLX5_CORE_EN_DCB
99static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
100{
101 int dscp_cp = 0;
102
103 if (skb->protocol == htons(ETH_P_IP))
104 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
105 else if (skb->protocol == htons(ETH_P_IPV6))
106 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
107
108 return priv->dcbx_dp.dscp2prio[dscp_cp];
109}
110#endif
111
e586b3b0 112u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
4f49dec9
AD
113 struct net_device *sb_dev,
114 select_queue_fallback_t fallback)
e586b3b0
AV
115{
116 struct mlx5e_priv *priv = netdev_priv(dev);
8ec56fc3 117 int channel_ix = fallback(dev, skb, NULL);
6a9764ef 118 u16 num_channels;
7ccdd084
RS
119 int up = 0;
120
121 if (!netdev_get_num_tc(dev))
122 return channel_ix;
123
fbcb127e
HN
124#ifdef CONFIG_MLX5_CORE_EN_DCB
125 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
126 up = mlx5e_get_dscp_up(priv, skb);
127 else
128#endif
129 if (skb_vlan_tag_present(skb))
6c0fbd72 130 up = skb_vlan_tag_get_prio(skb);
7ccdd084
RS
131
132 /* channel_ix can be larger than num_channels since
133 * dev->num_real_tx_queues = num_channels * num_tc
134 */
6a9764ef
SM
135 num_channels = priv->channels.params.num_channels;
136 if (channel_ix >= num_channels)
137 channel_ix = reciprocal_scale(channel_ix, num_channels);
e586b3b0 138
acc6c595 139 return priv->channel_tc2txq[channel_ix][up];
e586b3b0
AV
140}
141
ae76715d
HHZ
142static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
143{
144#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
145
146 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
147}
148
149static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
150{
151 struct flow_keys keys;
152
153 if (skb_transport_header_was_set(skb))
154 return skb_transport_offset(skb);
155 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
156 return keys.control.thoff;
157 else
158 return mlx5e_skb_l2_header_offset(skb);
159}
160
6aace17e
MS
161static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
162 struct sk_buff *skb)
ae76715d 163{
6aace17e 164 u16 hlen;
ae76715d
HHZ
165
166 switch (mode) {
a6f402e4
SM
167 case MLX5_INLINE_MODE_NONE:
168 return 0;
ae76715d
HHZ
169 case MLX5_INLINE_MODE_TCP_UDP:
170 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
171 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
172 hlen += VLAN_HLEN;
6aace17e 173 break;
ae76715d
HHZ
174 case MLX5_INLINE_MODE_IP:
175 /* When transport header is set to zero, it means no transport
176 * header. When transport header is set to 0xff's, it means
177 * transport header wasn't set.
178 */
6aace17e
MS
179 if (skb_transport_offset(skb)) {
180 hlen = mlx5e_skb_l3_header_offset(skb);
181 break;
182 }
ae76715d
HHZ
183 /* fall through */
184 case MLX5_INLINE_MODE_L2:
185 default:
6aace17e 186 hlen = mlx5e_skb_l2_header_offset(skb);
ae76715d 187 }
f600c608 188 return min_t(u16, hlen, skb_headlen(skb));
ae76715d
HHZ
189}
190
5e7d77a9 191static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
e4cf27bd
AS
192{
193 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
194 int cpy1_sz = 2 * ETH_ALEN;
3ea4891d 195 int cpy2_sz = ihs - cpy1_sz;
e4cf27bd 196
5e7d77a9 197 memcpy(vhdr, skb->data, cpy1_sz);
e4cf27bd
AS
198 vhdr->h_vlan_proto = skb->vlan_proto;
199 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
5e7d77a9 200 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
e4cf27bd
AS
201}
202
77bdf895
SM
203static inline void
204mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
e586b3b0 205{
98795158
MF
206 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
207 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
89db09eb 208 if (skb->encapsulation) {
98795158
MF
209 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
210 MLX5_ETH_WQE_L4_INNER_CSUM;
05909bab 211 sq->stats->csum_partial_inner++;
89db09eb 212 } else {
98795158 213 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
05909bab 214 sq->stats->csum_partial++;
89db09eb 215 }
98795158 216 } else
05909bab 217 sq->stats->csum_none++;
77bdf895 218}
e586b3b0 219
77bdf895 220static inline u16
043dc78e 221mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
77bdf895 222{
05909bab 223 struct mlx5e_sq_stats *stats = sq->stats;
77bdf895 224 u16 ihs;
98795158 225
77bdf895
SM
226 if (skb->encapsulation) {
227 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
05909bab
EBE
228 stats->tso_inner_packets++;
229 stats->tso_inner_bytes += skb->len - ihs;
e586b3b0 230 } else {
689adf0d
BP
231 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
232 ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
233 else
234 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
05909bab
EBE
235 stats->tso_packets++;
236 stats->tso_bytes += skb->len - ihs;
e586b3b0
AV
237 }
238
77bdf895
SM
239 return ihs;
240}
e586b3b0 241
77bdf895
SM
242static inline int
243mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
244 unsigned char *skb_data, u16 headlen,
245 struct mlx5_wqe_data_seg *dseg)
246{
247 dma_addr_t dma_addr = 0;
248 u8 num_dma = 0;
249 int i;
e586b3b0 250
e586b3b0 251 if (headlen) {
34802a42 252 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
e586b3b0
AV
253 DMA_TO_DEVICE);
254 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
d9a96ec3 255 goto dma_unmap_wqe_err;
e586b3b0
AV
256
257 dseg->addr = cpu_to_be64(dma_addr);
258 dseg->lkey = sq->mkey_be;
259 dseg->byte_count = cpu_to_be32(headlen);
260
d4e28cbd 261 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
77bdf895 262 num_dma++;
e586b3b0
AV
263 dseg++;
264 }
265
266 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
267 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
268 int fsz = skb_frag_size(frag);
269
270 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
e53eef63 271 DMA_TO_DEVICE);
e586b3b0 272 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
d9a96ec3 273 goto dma_unmap_wqe_err;
e586b3b0
AV
274
275 dseg->addr = cpu_to_be64(dma_addr);
276 dseg->lkey = sq->mkey_be;
277 dseg->byte_count = cpu_to_be32(fsz);
278
d4e28cbd 279 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
77bdf895 280 num_dma++;
e586b3b0
AV
281 dseg++;
282 }
283
77bdf895 284 return num_dma;
d9a96ec3
TT
285
286dma_unmap_wqe_err:
287 mlx5e_dma_unmap_wqe_err(sq, num_dma);
288 return -ENOMEM;
77bdf895 289}
e586b3b0 290
3a2f7033
TT
291static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
292 struct mlx5_wq_cyc *wq,
37fdffb2 293 u16 pi, u16 nnops)
043dc78e
TT
294{
295 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
043dc78e
TT
296
297 edge_wi = wi + nnops;
298
3a2f7033 299 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
043dc78e
TT
300 for (; wi < edge_wi; wi++) {
301 wi->skb = NULL;
302 wi->num_wqebbs = 1;
303 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
304 }
05909bab 305 sq->stats->nop += nnops;
043dc78e
TT
306}
307
77bdf895
SM
308static inline void
309mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
043dc78e 310 u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
77bdf895
SM
311 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
312{
313 struct mlx5_wq_cyc *wq = &sq->wq;
e586b3b0 314
77bdf895
SM
315 wi->num_bytes = num_bytes;
316 wi->num_dma = num_dma;
043dc78e 317 wi->num_wqebbs = num_wqebbs;
77bdf895 318 wi->skb = skb;
e586b3b0 319
77bdf895
SM
320 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
321 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
322
323 netdev_tx_sent_queue(sq->txq, num_bytes);
e586b3b0 324
ef9814de
EBE
325 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
326 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
327
77bdf895
SM
328 sq->pc += wi->num_wqebbs;
329 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
e586b3b0 330 netif_tx_stop_queue(sq->txq);
05909bab 331 sq->stats->stopped++;
e586b3b0
AV
332 }
333
864b2d71
SM
334 if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
335 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
77bdf895
SM
336}
337
043dc78e
TT
338#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
339
bf239741
IL
340netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
341 struct mlx5e_tx_wqe *wqe, u16 pi)
77bdf895 342{
043dc78e
TT
343 struct mlx5_wq_cyc *wq = &sq->wq;
344 struct mlx5_wqe_ctrl_seg *cseg;
345 struct mlx5_wqe_eth_seg *eseg;
346 struct mlx5_wqe_data_seg *dseg;
347 struct mlx5e_tx_wqe_info *wi;
77bdf895 348
05909bab 349 struct mlx5e_sq_stats *stats = sq->stats;
37fdffb2 350 u16 headlen, ihs, contig_wqebbs_room;
043dc78e
TT
351 u16 ds_cnt, ds_cnt_inl = 0;
352 u8 num_wqebbs, opcode;
043dc78e 353 u32 num_bytes;
77bdf895 354 int num_dma;
043dc78e 355 __be16 mss;
77bdf895 356
043dc78e
TT
357 /* Calc ihs and ds cnt, no writes to wqe yet */
358 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
77bdf895 359 if (skb_is_gso(skb)) {
043dc78e
TT
360 opcode = MLX5_OPCODE_LSO;
361 mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
362 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
363 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
05909bab 364 stats->packets += skb_shinfo(skb)->gso_segs;
77bdf895 365 } else {
043dc78e
TT
366 opcode = MLX5_OPCODE_SEND;
367 mss = 0;
368 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
77bdf895 369 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
05909bab 370 stats->packets++;
77bdf895 371 }
043dc78e 372
05909bab
EBE
373 stats->bytes += num_bytes;
374 stats->xmit_more += skb->xmit_more;
77bdf895 375
5e7d77a9 376 headlen = skb->len - ihs - skb->data_len;
043dc78e
TT
377 ds_cnt += !!headlen;
378 ds_cnt += skb_shinfo(skb)->nr_frags;
379
380 if (ihs) {
381 ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
382
383 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
384 ds_cnt += ds_cnt_inl;
385 }
386
387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
37fdffb2
TT
388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
389 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
82eaa1fa
RS
390#ifdef CONFIG_MLX5_EN_IPSEC
391 struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
392#endif
37fdffb2 393 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
043dc78e 394 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
82eaa1fa
RS
395#ifdef CONFIG_MLX5_EN_IPSEC
396 wqe->eth = cur_eth;
397#endif
043dc78e
TT
398 }
399
400 /* fill wqe */
401 wi = &sq->db.wqe_info[pi];
402 cseg = &wqe->ctrl;
403 eseg = &wqe->eth;
404 dseg = wqe->data;
405
406 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
407
408 eseg->mss = mss;
409
77bdf895 410 if (ihs) {
5e7d77a9 411 eseg->inline_hdr.sz = cpu_to_be16(ihs);
77bdf895 412 if (skb_vlan_tag_present(skb)) {
5e7d77a9
TT
413 ihs -= VLAN_HLEN;
414 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
05909bab 415 stats->added_vlan_packets++;
77bdf895 416 } else {
5e7d77a9 417 memcpy(eseg->inline_hdr.start, skb->data, ihs);
77bdf895 418 }
043dc78e 419 dseg += ds_cnt_inl;
77bdf895
SM
420 } else if (skb_vlan_tag_present(skb)) {
421 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
4382c7b9
GP
422 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
423 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
77bdf895 424 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
05909bab 425 stats->added_vlan_packets++;
77bdf895
SM
426 }
427
5e7d77a9 428 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
77bdf895 429 if (unlikely(num_dma < 0))
d9a96ec3 430 goto err_drop;
77bdf895 431
043dc78e
TT
432 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
433 num_dma, wi, cseg);
12be4b21 434
e586b3b0
AV
435 return NETDEV_TX_OK;
436
d9a96ec3 437err_drop:
05909bab 438 stats->dropped++;
e586b3b0
AV
439 dev_kfree_skb_any(skb);
440
441 return NETDEV_TX_OK;
442}
443
444netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
445{
446 struct mlx5e_priv *priv = netdev_priv(dev);
bf239741
IL
447 struct mlx5e_tx_wqe *wqe;
448 struct mlx5e_txqsq *sq;
449 u16 pi;
2ac9cfe7 450
bf239741
IL
451 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
452 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
2ac9cfe7 453
bf239741
IL
454 /* might send skbs and update wqe and pi */
455 skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
456 if (unlikely(!skb))
457 return NETDEV_TX_OK;
689adf0d 458
2ac9cfe7 459 return mlx5e_sq_xmit(sq, skb, wqe, pi);
e586b3b0
AV
460}
461
16cc14d8
EBE
462static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
463 struct mlx5_err_cqe *err_cqe)
464{
465 u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
466
467 netdev_err(sq->channel->netdev,
e05b8d4f
TT
468 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
469 sq->cq.mcq.cqn, ci, sq->sqn,
470 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
471 err_cqe->syndrome, err_cqe->vendor_err_synd);
16cc14d8
EBE
472 mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
473}
474
8ec736e5 475bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
e586b3b0 476{
86155656 477 struct mlx5e_sq_stats *stats;
31391048 478 struct mlx5e_txqsq *sq;
4b7dfc99 479 struct mlx5_cqe64 *cqe;
e586b3b0
AV
480 u32 dma_fifo_cc;
481 u32 nbytes;
482 u16 npkts;
483 u16 sqcc;
484 int i;
485
31391048 486 sq = container_of(cq, struct mlx5e_txqsq, cq);
e586b3b0 487
0e5c04f6 488 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
29429f33
DJ
489 return false;
490
4b7dfc99
TT
491 cqe = mlx5_cqwq_get_cqe(&cq->wq);
492 if (!cqe)
493 return false;
494
86155656
TT
495 stats = sq->stats;
496
e586b3b0
AV
497 npkts = 0;
498 nbytes = 0;
499
500 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
501 * otherwise a cq overrun may occur
502 */
503 sqcc = sq->cc;
504
505 /* avoid dirtying sq cache line every cqe */
506 dma_fifo_cc = sq->dma_fifo_cc;
507
4b7dfc99
TT
508 i = 0;
509 do {
059ba072
AS
510 u16 wqe_counter;
511 bool last_wqe;
e586b3b0 512
a1f5a1a8
AS
513 mlx5_cqwq_pop(&cq->wq);
514
059ba072
AS
515 wqe_counter = be16_to_cpu(cqe->wqe_counter);
516
6254adeb 517 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
db75373c
EBE
518 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
519 &sq->state)) {
16cc14d8
EBE
520 mlx5e_dump_error_cqe(sq,
521 (struct mlx5_err_cqe *)cqe);
de8650a8
EBE
522 if (!IS_ERR_OR_NULL(cq->channel->priv->tx_reporter))
523 queue_work(cq->channel->priv->wq,
524 &sq->recover_work);
db75373c 525 }
86155656 526 stats->cqe_err++;
16cc14d8
EBE
527 }
528
059ba072 529 do {
34802a42 530 struct mlx5e_tx_wqe_info *wi;
059ba072
AS
531 struct sk_buff *skb;
532 u16 ci;
533 int j;
534
535 last_wqe = (sqcc == wqe_counter);
536
ddf385e3 537 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
31391048 538 wi = &sq->db.wqe_info[ci];
77bdf895 539 skb = wi->skb;
e586b3b0 540
059ba072 541 if (unlikely(!skb)) { /* nop */
059ba072
AS
542 sqcc++;
543 continue;
544 }
e586b3b0 545
ef9814de
EBE
546 if (unlikely(skb_shinfo(skb)->tx_flags &
547 SKBTX_HW_TSTAMP)) {
548 struct skb_shared_hwtstamps hwts = {};
549
7c39afb3
FD
550 hwts.hwtstamp =
551 mlx5_timecounter_cyc2time(sq->clock,
552 get_cqe_ts(cqe));
ef9814de
EBE
553 skb_tstamp_tx(skb, &hwts);
554 }
555
34802a42 556 for (j = 0; j < wi->num_dma; j++) {
d4e28cbd
AS
557 struct mlx5e_sq_dma *dma =
558 mlx5e_dma_get(sq, dma_fifo_cc++);
e586b3b0 559
d4e28cbd 560 mlx5e_tx_dma_unmap(sq->pdev, dma);
059ba072 561 }
e586b3b0 562
059ba072 563 npkts++;
34802a42
AS
564 nbytes += wi->num_bytes;
565 sqcc += wi->num_wqebbs;
8ec736e5 566 napi_consume_skb(skb, napi_budget);
059ba072 567 } while (!last_wqe);
4b7dfc99
TT
568
569 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
e586b3b0 570
86155656
TT
571 stats->cqes += i;
572
e586b3b0
AV
573 mlx5_cqwq_update_db_record(&cq->wq);
574
575 /* ensure cq space is freed before enabling more cqes */
576 wmb();
577
578 sq->dma_fifo_cc = dma_fifo_cc;
579 sq->cc = sqcc;
580
581 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
582
583 if (netif_tx_queue_stopped(sq->txq) &&
db75373c
EBE
584 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
585 MLX5E_SQ_STOP_ROOM) &&
586 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
6e8dd6d6 587 netif_tx_wake_queue(sq->txq);
86155656 588 stats->wake++;
e586b3b0 589 }
e586b3b0 590
59a7c2fd 591 return (i == MLX5E_TX_CQ_POLL_BUDGET);
e586b3b0 592}
6e8dd6d6 593
31391048 594void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
6e8dd6d6
SM
595{
596 struct mlx5e_tx_wqe_info *wi;
597 struct sk_buff *skb;
598 u16 ci;
599 int i;
600
601 while (sq->cc != sq->pc) {
ddf385e3 602 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
31391048 603 wi = &sq->db.wqe_info[ci];
77bdf895 604 skb = wi->skb;
6e8dd6d6
SM
605
606 if (!skb) { /* nop */
607 sq->cc++;
608 continue;
609 }
610
611 for (i = 0; i < wi->num_dma; i++) {
612 struct mlx5e_sq_dma *dma =
613 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
614
615 mlx5e_tx_dma_unmap(sq->pdev, dma);
616 }
617
618 dev_kfree_skb_any(skb);
619 sq->cc += wi->num_wqebbs;
620 }
621}
25854544
SM
622
623#ifdef CONFIG_MLX5_CORE_IPOIB
25854544
SM
624static inline void
625mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
626 struct mlx5_wqe_datagram_seg *dseg)
627{
628 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
629 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
630 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
631}
632
633netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
634 struct mlx5_av *av, u32 dqpn, u32 dqkey)
635{
043dc78e
TT
636 struct mlx5_wq_cyc *wq = &sq->wq;
637 struct mlx5i_tx_wqe *wqe;
25854544 638
043dc78e
TT
639 struct mlx5_wqe_datagram_seg *datagram;
640 struct mlx5_wqe_ctrl_seg *cseg;
641 struct mlx5_wqe_eth_seg *eseg;
642 struct mlx5_wqe_data_seg *dseg;
643 struct mlx5e_tx_wqe_info *wi;
25854544 644
05909bab 645 struct mlx5e_sq_stats *stats = sq->stats;
37fdffb2 646 u16 headlen, ihs, pi, contig_wqebbs_room;
043dc78e
TT
647 u16 ds_cnt, ds_cnt_inl = 0;
648 u8 num_wqebbs, opcode;
043dc78e 649 u32 num_bytes;
25854544 650 int num_dma;
043dc78e 651 __be16 mss;
25854544 652
043dc78e
TT
653 /* Calc ihs and ds cnt, no writes to wqe yet */
654 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
25854544 655 if (skb_is_gso(skb)) {
043dc78e
TT
656 opcode = MLX5_OPCODE_LSO;
657 mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
658 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
659 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
05909bab 660 stats->packets += skb_shinfo(skb)->gso_segs;
25854544 661 } else {
043dc78e
TT
662 opcode = MLX5_OPCODE_SEND;
663 mss = 0;
664 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
25854544 665 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
05909bab 666 stats->packets++;
25854544
SM
667 }
668
05909bab
EBE
669 stats->bytes += num_bytes;
670 stats->xmit_more += skb->xmit_more;
4ec5cf78 671
5e7d77a9 672 headlen = skb->len - ihs - skb->data_len;
043dc78e
TT
673 ds_cnt += !!headlen;
674 ds_cnt += skb_shinfo(skb)->nr_frags;
675
676 if (ihs) {
677 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
678 ds_cnt += ds_cnt_inl;
679 }
680
681 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
37fdffb2
TT
682 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
683 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
684 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
685 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
4b3e85a5 686 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
043dc78e
TT
687 }
688
37fdffb2 689 mlx5i_sq_fetch_wqe(sq, &wqe, pi);
4b3e85a5 690
043dc78e
TT
691 /* fill wqe */
692 wi = &sq->db.wqe_info[pi];
693 cseg = &wqe->ctrl;
694 datagram = &wqe->datagram;
695 eseg = &wqe->eth;
696 dseg = wqe->data;
697
698 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
699
700 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
701
702 eseg->mss = mss;
703
25854544 704 if (ihs) {
5e7d77a9 705 memcpy(eseg->inline_hdr.start, skb->data, ihs);
25854544 706 eseg->inline_hdr.sz = cpu_to_be16(ihs);
043dc78e 707 dseg += ds_cnt_inl;
25854544
SM
708 }
709
5e7d77a9 710 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
25854544 711 if (unlikely(num_dma < 0))
d9a96ec3 712 goto err_drop;
25854544 713
043dc78e
TT
714 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
715 num_dma, wi, cseg);
25854544
SM
716
717 return NETDEV_TX_OK;
718
d9a96ec3 719err_drop:
05909bab 720 stats->dropped++;
25854544
SM
721 dev_kfree_skb_any(skb);
722
723 return NETDEV_TX_OK;
724}
25854544 725#endif