net/mlx5: File renaming towards ptp core implementation
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tx.c
CommitLineData
e586b3b0 1/*
98795158 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
e586b3b0
AV
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/tcp.h>
34#include <linux/if_vlan.h>
35#include "en.h"
4301ba7b 36#include "ipoib/ipoib.h"
2ac9cfe7 37#include "en_accel/ipsec_rxtx.h"
e586b3b0 38
12be4b21
SM
39#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
40#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
41 MLX5E_SQ_NOPS_ROOM)
42
d4e28cbd
AS
43static inline void mlx5e_tx_dma_unmap(struct device *pdev,
44 struct mlx5e_sq_dma *dma)
e586b3b0 45{
d4e28cbd
AS
46 switch (dma->type) {
47 case MLX5E_DMA_MAP_SINGLE:
48 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
49 break;
50 case MLX5E_DMA_MAP_PAGE:
51 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
52 break;
53 default:
54 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
e586b3b0
AV
55 }
56}
57
31391048 58static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
d4e28cbd
AS
59 dma_addr_t addr,
60 u32 size,
61 enum mlx5e_dma_map_type map_type)
e586b3b0 62{
f10b7cc7
SM
63 u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
64
31391048
SM
65 sq->db.dma_fifo[i].addr = addr;
66 sq->db.dma_fifo[i].size = size;
67 sq->db.dma_fifo[i].type = map_type;
e586b3b0
AV
68 sq->dma_fifo_pc++;
69}
70
31391048 71static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
d4e28cbd 72{
31391048 73 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
d4e28cbd
AS
74}
75
31391048 76static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
e586b3b0 77{
d4e28cbd
AS
78 int i;
79
34802a42 80 for (i = 0; i < num_dma; i++) {
d4e28cbd
AS
81 struct mlx5e_sq_dma *last_pushed_dma =
82 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
83
84 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
85 }
e586b3b0
AV
86}
87
88u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
89 void *accel_priv, select_queue_fallback_t fallback)
90{
91 struct mlx5e_priv *priv = netdev_priv(dev);
92 int channel_ix = fallback(dev, skb);
6a9764ef 93 u16 num_channels;
7ccdd084
RS
94 int up = 0;
95
96 if (!netdev_get_num_tc(dev))
97 return channel_ix;
98
99 if (skb_vlan_tag_present(skb))
100 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
101
102 /* channel_ix can be larger than num_channels since
103 * dev->num_real_tx_queues = num_channels * num_tc
104 */
6a9764ef
SM
105 num_channels = priv->channels.params.num_channels;
106 if (channel_ix >= num_channels)
107 channel_ix = reciprocal_scale(channel_ix, num_channels);
e586b3b0 108
acc6c595 109 return priv->channel_tc2txq[channel_ix][up];
e586b3b0
AV
110}
111
ae76715d
HHZ
112static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
113{
114#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
115
116 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
117}
118
119static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
120{
121 struct flow_keys keys;
122
123 if (skb_transport_header_was_set(skb))
124 return skb_transport_offset(skb);
125 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
126 return keys.control.thoff;
127 else
128 return mlx5e_skb_l2_header_offset(skb);
129}
130
6aace17e
MS
131static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
132 struct sk_buff *skb)
ae76715d 133{
6aace17e 134 u16 hlen;
ae76715d
HHZ
135
136 switch (mode) {
a6f402e4
SM
137 case MLX5_INLINE_MODE_NONE:
138 return 0;
ae76715d
HHZ
139 case MLX5_INLINE_MODE_TCP_UDP:
140 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
141 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
142 hlen += VLAN_HLEN;
6aace17e 143 break;
ae76715d
HHZ
144 case MLX5_INLINE_MODE_IP:
145 /* When transport header is set to zero, it means no transport
146 * header. When transport header is set to 0xff's, it means
147 * transport header wasn't set.
148 */
6aace17e
MS
149 if (skb_transport_offset(skb)) {
150 hlen = mlx5e_skb_l3_header_offset(skb);
151 break;
152 }
ae76715d
HHZ
153 /* fall through */
154 case MLX5_INLINE_MODE_L2:
155 default:
6aace17e 156 hlen = mlx5e_skb_l2_header_offset(skb);
ae76715d 157 }
6aace17e 158 return min_t(u16, hlen, skb->len);
ae76715d
HHZ
159}
160
34802a42
AS
161static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
162 unsigned int *skb_len,
163 unsigned int len)
164{
165 *skb_len -= len;
166 *skb_data += len;
167}
168
169static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
170 unsigned char **skb_data,
171 unsigned int *skb_len)
e4cf27bd
AS
172{
173 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
174 int cpy1_sz = 2 * ETH_ALEN;
3ea4891d 175 int cpy2_sz = ihs - cpy1_sz;
e4cf27bd 176
34802a42
AS
177 memcpy(vhdr, *skb_data, cpy1_sz);
178 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
e4cf27bd
AS
179 vhdr->h_vlan_proto = skb->vlan_proto;
180 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
34802a42
AS
181 memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
182 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
e4cf27bd
AS
183}
184
77bdf895
SM
185static inline void
186mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
e586b3b0 187{
98795158
MF
188 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
189 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
89db09eb 190 if (skb->encapsulation) {
98795158
MF
191 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
192 MLX5_ETH_WQE_L4_INNER_CSUM;
bfe6d8d1 193 sq->stats.csum_partial_inner++;
89db09eb 194 } else {
98795158 195 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
603e1f5b 196 sq->stats.csum_partial++;
89db09eb 197 }
98795158 198 } else
bfe6d8d1 199 sq->stats.csum_none++;
77bdf895 200}
e586b3b0 201
77bdf895
SM
202static inline u16
203mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
204 struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
205{
206 u16 ihs;
98795158 207
77bdf895 208 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
98795158 209
77bdf895
SM
210 if (skb->encapsulation) {
211 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
212 sq->stats.tso_inner_packets++;
213 sq->stats.tso_inner_bytes += skb->len - ihs;
e586b3b0 214 } else {
77bdf895
SM
215 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
216 sq->stats.tso_packets++;
217 sq->stats.tso_bytes += skb->len - ihs;
e586b3b0
AV
218 }
219
77bdf895
SM
220 *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
221 return ihs;
222}
e586b3b0 223
77bdf895
SM
224static inline int
225mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
226 unsigned char *skb_data, u16 headlen,
227 struct mlx5_wqe_data_seg *dseg)
228{
229 dma_addr_t dma_addr = 0;
230 u8 num_dma = 0;
231 int i;
e586b3b0 232
e586b3b0 233 if (headlen) {
34802a42 234 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
e586b3b0
AV
235 DMA_TO_DEVICE);
236 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
77bdf895 237 return -ENOMEM;
e586b3b0
AV
238
239 dseg->addr = cpu_to_be64(dma_addr);
240 dseg->lkey = sq->mkey_be;
241 dseg->byte_count = cpu_to_be32(headlen);
242
d4e28cbd 243 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
77bdf895 244 num_dma++;
e586b3b0
AV
245 dseg++;
246 }
247
248 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
249 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
250 int fsz = skb_frag_size(frag);
251
252 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
e53eef63 253 DMA_TO_DEVICE);
e586b3b0 254 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
77bdf895 255 return -ENOMEM;
e586b3b0
AV
256
257 dseg->addr = cpu_to_be64(dma_addr);
258 dseg->lkey = sq->mkey_be;
259 dseg->byte_count = cpu_to_be32(fsz);
260
d4e28cbd 261 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
77bdf895 262 num_dma++;
e586b3b0
AV
263 dseg++;
264 }
265
77bdf895
SM
266 return num_dma;
267}
e586b3b0 268
77bdf895
SM
269static inline void
270mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
271 u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
272 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
273{
274 struct mlx5_wq_cyc *wq = &sq->wq;
275 u16 pi;
e586b3b0 276
77bdf895
SM
277 wi->num_bytes = num_bytes;
278 wi->num_dma = num_dma;
34802a42 279 wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
77bdf895 280 wi->skb = skb;
e586b3b0 281
77bdf895
SM
282 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
283 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
284
285 netdev_tx_sent_queue(sq->txq, num_bytes);
e586b3b0 286
ef9814de
EBE
287 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
288 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
289
77bdf895
SM
290 sq->pc += wi->num_wqebbs;
291 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
e586b3b0
AV
292 netif_tx_stop_queue(sq->txq);
293 sq->stats.stopped++;
294 }
295
864b2d71
SM
296 if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
297 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
e586b3b0 298
12be4b21 299 /* fill sq edge with nops to avoid wqe wrap around */
f10b7cc7 300 while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
77bdf895
SM
301 sq->db.wqe_info[pi].skb = NULL;
302 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
864b2d71 303 sq->stats.nop++;
f10b7cc7 304 }
77bdf895
SM
305}
306
2ac9cfe7
IT
307static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
308 struct mlx5e_tx_wqe *wqe, u16 pi)
77bdf895 309{
77bdf895
SM
310 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
311
312 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
313 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
314
315 unsigned char *skb_data = skb->data;
316 unsigned int skb_len = skb->len;
317 u8 opcode = MLX5_OPCODE_SEND;
318 unsigned int num_bytes;
319 int num_dma;
320 u16 headlen;
321 u16 ds_cnt;
322 u16 ihs;
323
77bdf895
SM
324 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
325
326 if (skb_is_gso(skb)) {
327 opcode = MLX5_OPCODE_LSO;
328 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
329 sq->stats.packets += skb_shinfo(skb)->gso_segs;
330 } else {
331 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
332 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
333 sq->stats.packets++;
334 }
335 sq->stats.bytes += num_bytes;
336 sq->stats.xmit_more += skb->xmit_more;
337
338 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
339 if (ihs) {
340 if (skb_vlan_tag_present(skb)) {
341 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
342 ihs += VLAN_HLEN;
343 } else {
344 memcpy(eseg->inline_hdr.start, skb_data, ihs);
345 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
346 }
347 eseg->inline_hdr.sz = cpu_to_be16(ihs);
348 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
349 } else if (skb_vlan_tag_present(skb)) {
350 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
351 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
352 }
353
354 headlen = skb_len - skb->data_len;
355 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
356 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
357 if (unlikely(num_dma < 0))
358 goto dma_unmap_wqe_err;
359
360 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
361 num_bytes, num_dma, wi, cseg);
12be4b21 362
e586b3b0
AV
363 return NETDEV_TX_OK;
364
365dma_unmap_wqe_err:
366 sq->stats.dropped++;
34802a42 367 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
e586b3b0
AV
368
369 dev_kfree_skb_any(skb);
370
371 return NETDEV_TX_OK;
372}
373
374netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
375{
376 struct mlx5e_priv *priv = netdev_priv(dev);
acc6c595 377 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
2ac9cfe7
IT
378 struct mlx5_wq_cyc *wq = &sq->wq;
379 u16 pi = sq->pc & wq->sz_m1;
380 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
381
382 memset(wqe, 0, sizeof(*wqe));
383
384#ifdef CONFIG_MLX5_EN_IPSEC
385 if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
386 skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
387 if (unlikely(!skb))
388 return NETDEV_TX_OK;
389 }
390#endif
e586b3b0 391
2ac9cfe7 392 return mlx5e_sq_xmit(sq, skb, wqe, pi);
e586b3b0
AV
393}
394
8ec736e5 395bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
e586b3b0 396{
31391048 397 struct mlx5e_txqsq *sq;
4b7dfc99 398 struct mlx5_cqe64 *cqe;
e586b3b0
AV
399 u32 dma_fifo_cc;
400 u32 nbytes;
401 u16 npkts;
402 u16 sqcc;
403 int i;
404
31391048 405 sq = container_of(cq, struct mlx5e_txqsq, cq);
e586b3b0 406
a1eaba4c 407 if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
29429f33
DJ
408 return false;
409
4b7dfc99
TT
410 cqe = mlx5_cqwq_get_cqe(&cq->wq);
411 if (!cqe)
412 return false;
413
e586b3b0
AV
414 npkts = 0;
415 nbytes = 0;
416
417 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
418 * otherwise a cq overrun may occur
419 */
420 sqcc = sq->cc;
421
422 /* avoid dirtying sq cache line every cqe */
423 dma_fifo_cc = sq->dma_fifo_cc;
424
4b7dfc99
TT
425 i = 0;
426 do {
059ba072
AS
427 u16 wqe_counter;
428 bool last_wqe;
e586b3b0 429
a1f5a1a8
AS
430 mlx5_cqwq_pop(&cq->wq);
431
059ba072
AS
432 wqe_counter = be16_to_cpu(cqe->wqe_counter);
433
434 do {
34802a42 435 struct mlx5e_tx_wqe_info *wi;
059ba072
AS
436 struct sk_buff *skb;
437 u16 ci;
438 int j;
439
440 last_wqe = (sqcc == wqe_counter);
441
442 ci = sqcc & sq->wq.sz_m1;
31391048 443 wi = &sq->db.wqe_info[ci];
77bdf895 444 skb = wi->skb;
e586b3b0 445
059ba072 446 if (unlikely(!skb)) { /* nop */
059ba072
AS
447 sqcc++;
448 continue;
449 }
e586b3b0 450
ef9814de
EBE
451 if (unlikely(skb_shinfo(skb)->tx_flags &
452 SKBTX_HW_TSTAMP)) {
453 struct skb_shared_hwtstamps hwts = {};
454
455 mlx5e_fill_hwstamp(sq->tstamp,
456 get_cqe_ts(cqe), &hwts);
457 skb_tstamp_tx(skb, &hwts);
458 }
459
34802a42 460 for (j = 0; j < wi->num_dma; j++) {
d4e28cbd
AS
461 struct mlx5e_sq_dma *dma =
462 mlx5e_dma_get(sq, dma_fifo_cc++);
e586b3b0 463
d4e28cbd 464 mlx5e_tx_dma_unmap(sq->pdev, dma);
059ba072 465 }
e586b3b0 466
059ba072 467 npkts++;
34802a42
AS
468 nbytes += wi->num_bytes;
469 sqcc += wi->num_wqebbs;
8ec736e5 470 napi_consume_skb(skb, napi_budget);
059ba072 471 } while (!last_wqe);
4b7dfc99
TT
472
473 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
e586b3b0
AV
474
475 mlx5_cqwq_update_db_record(&cq->wq);
476
477 /* ensure cq space is freed before enabling more cqes */
478 wmb();
479
480 sq->dma_fifo_cc = dma_fifo_cc;
481 sq->cc = sqcc;
482
483 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
484
485 if (netif_tx_queue_stopped(sq->txq) &&
864b2d71 486 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
6e8dd6d6
SM
487 netif_tx_wake_queue(sq->txq);
488 sq->stats.wake++;
e586b3b0 489 }
e586b3b0 490
59a7c2fd 491 return (i == MLX5E_TX_CQ_POLL_BUDGET);
e586b3b0 492}
6e8dd6d6 493
31391048 494void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
6e8dd6d6
SM
495{
496 struct mlx5e_tx_wqe_info *wi;
497 struct sk_buff *skb;
498 u16 ci;
499 int i;
500
501 while (sq->cc != sq->pc) {
502 ci = sq->cc & sq->wq.sz_m1;
31391048 503 wi = &sq->db.wqe_info[ci];
77bdf895 504 skb = wi->skb;
6e8dd6d6
SM
505
506 if (!skb) { /* nop */
507 sq->cc++;
508 continue;
509 }
510
511 for (i = 0; i < wi->num_dma; i++) {
512 struct mlx5e_sq_dma *dma =
513 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
514
515 mlx5e_tx_dma_unmap(sq->pdev, dma);
516 }
517
518 dev_kfree_skb_any(skb);
519 sq->cc += wi->num_wqebbs;
520 }
521}
25854544
SM
522
523#ifdef CONFIG_MLX5_CORE_IPOIB
524
525struct mlx5_wqe_eth_pad {
526 u8 rsvd0[16];
527};
528
529struct mlx5i_tx_wqe {
530 struct mlx5_wqe_ctrl_seg ctrl;
531 struct mlx5_wqe_datagram_seg datagram;
532 struct mlx5_wqe_eth_pad pad;
533 struct mlx5_wqe_eth_seg eth;
534};
535
536static inline void
537mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
538 struct mlx5_wqe_datagram_seg *dseg)
539{
540 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
541 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
542 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
543}
544
545netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
546 struct mlx5_av *av, u32 dqpn, u32 dqkey)
547{
548 struct mlx5_wq_cyc *wq = &sq->wq;
549 u16 pi = sq->pc & wq->sz_m1;
550 struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
551 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
552
553 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
554 struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
555 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
556
557 unsigned char *skb_data = skb->data;
558 unsigned int skb_len = skb->len;
559 u8 opcode = MLX5_OPCODE_SEND;
560 unsigned int num_bytes;
561 int num_dma;
562 u16 headlen;
563 u16 ds_cnt;
564 u16 ihs;
565
566 memset(wqe, 0, sizeof(*wqe));
567
568 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
569
570 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
571
572 if (skb_is_gso(skb)) {
573 opcode = MLX5_OPCODE_LSO;
574 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
4ec5cf78 575 sq->stats.packets += skb_shinfo(skb)->gso_segs;
25854544
SM
576 } else {
577 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
578 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
4ec5cf78 579 sq->stats.packets++;
25854544
SM
580 }
581
4ec5cf78
ES
582 sq->stats.bytes += num_bytes;
583 sq->stats.xmit_more += skb->xmit_more;
584
25854544
SM
585 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
586 if (ihs) {
587 memcpy(eseg->inline_hdr.start, skb_data, ihs);
588 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
589 eseg->inline_hdr.sz = cpu_to_be16(ihs);
590 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
591 }
592
593 headlen = skb_len - skb->data_len;
594 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
595 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
596 if (unlikely(num_dma < 0))
597 goto dma_unmap_wqe_err;
598
599 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
600 num_bytes, num_dma, wi, cseg);
601
602 return NETDEV_TX_OK;
603
604dma_unmap_wqe_err:
605 sq->stats.dropped++;
606 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
607
608 dev_kfree_skb_any(skb);
609
610 return NETDEV_TX_OK;
611}
612
613#endif