bnxt: use READ_ONCE/WRITE_ONCE for ring indexes
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx4 / en_tx.c
CommitLineData
c27a02cd
YP
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <asm/page.h>
35#include <linux/mlx4/cq.h>
5a0e3ad6 36#include <linux/slab.h>
c27a02cd
YP
37#include <linux/mlx4/qp.h>
38#include <linux/skbuff.h>
39#include <linux/if_vlan.h>
29d40c90 40#include <linux/prefetch.h>
c27a02cd 41#include <linux/vmalloc.h>
fa37a958 42#include <linux/tcp.h>
837052d0 43#include <linux/ip.h>
09067122 44#include <linux/ipv6.h>
310660a1 45#include <linux/indirect_call_wrapper.h>
1169a642 46#include <net/ipv6.h>
c27a02cd
YP
47
48#include "mlx4_en.h"
49
c27a02cd 50int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ddae0349 51 struct mlx4_en_tx_ring **pring, u32 size,
d03a68f8 52 u16 stride, int node, int queue_index)
c27a02cd
YP
53{
54 struct mlx4_en_dev *mdev = priv->mdev;
41d942d5 55 struct mlx4_en_tx_ring *ring;
c27a02cd
YP
56 int tmp;
57 int err;
58
163561a4 59 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
41d942d5 60 if (!ring) {
4beaacc6
ED
61 en_err(priv, "Failed allocating TX ring\n");
62 return -ENOMEM;
41d942d5
EE
63 }
64
c27a02cd
YP
65 ring->size = size;
66 ring->size_mask = size - 1;
e3f42f84 67 ring->sp_stride = stride;
35f31ff0 68 ring->full_size = ring->size - HEADROOM - MLX4_MAX_DESC_TXBBS;
c27a02cd 69
c27a02cd 70 tmp = size * sizeof(struct mlx4_en_tx_info);
752ade68 71 ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
41d942d5 72 if (!ring->tx_info) {
752ade68
MH
73 err = -ENOMEM;
74 goto err_ring;
41d942d5 75 }
e404decb 76
453a6082 77 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
c27a02cd
YP
78 ring->tx_info, tmp);
79
35f31ff0
ED
80 ring->bounce_buf = kmalloc_node(MLX4_TX_BOUNCE_BUFFER_SIZE,
81 GFP_KERNEL, node);
c27a02cd 82 if (!ring->bounce_buf) {
35f31ff0
ED
83 ring->bounce_buf = kmalloc(MLX4_TX_BOUNCE_BUFFER_SIZE,
84 GFP_KERNEL);
163561a4
EE
85 if (!ring->bounce_buf) {
86 err = -ENOMEM;
87 goto err_info;
88 }
c27a02cd 89 }
e3f42f84 90 ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE);
c27a02cd 91
163561a4 92 /* Allocate HW buffers on provided NUMA node */
872bf2fb 93 set_dev_node(&mdev->dev->persist->pdev->dev, node);
e3f42f84 94 err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
872bf2fb 95 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
c27a02cd 96 if (err) {
453a6082 97 en_err(priv, "Failed allocating hwq resources\n");
c27a02cd
YP
98 goto err_bounce;
99 }
100
e3f42f84 101 ring->buf = ring->sp_wqres.buf.direct.buf;
c27a02cd 102
1a91de28
JP
103 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
104 ring, ring->buf, ring->size, ring->buf_size,
e3f42f84 105 (unsigned long long) ring->sp_wqres.buf.direct.map);
c27a02cd 106
ddae0349 107 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
f3301870
MS
108 MLX4_RESERVE_ETH_BF_QP,
109 MLX4_RES_USAGE_DRIVER);
ddae0349
EE
110 if (err) {
111 en_err(priv, "failed reserving qp for TX ring\n");
73898db0 112 goto err_hwq_res;
ddae0349
EE
113 }
114
8900b894 115 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
c27a02cd 116 if (err) {
453a6082 117 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
ddae0349 118 goto err_reserve;
c27a02cd 119 }
e3f42f84 120 ring->sp_qp.event = mlx4_en_sqp_event;
c27a02cd 121
163561a4 122 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
87a5c389 123 if (err) {
1a91de28 124 en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
87a5c389
YP
125 ring->bf.uar = &mdev->priv_uar;
126 ring->bf.uar->map = mdev->uar_map;
127 ring->bf_enabled = false;
0fef9d03
AV
128 ring->bf_alloced = false;
129 priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
130 } else {
131 ring->bf_alloced = true;
132 ring->bf_enabled = !!(priv->pflags &
133 MLX4_EN_PRIV_FLAGS_BLUEFLAME);
134 }
9ac93627 135 ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL;
87a5c389 136
ec693d47 137 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
d03a68f8
IS
138 ring->queue_index = queue_index;
139
42eab005 140 if (queue_index < priv->num_tx_rings_p_up)
f36963c9
RR
141 cpumask_set_cpu(cpumask_local_spread(queue_index,
142 priv->mdev->dev->numa_node),
e3f42f84 143 &ring->sp_affinity_mask);
ec693d47 144
41d942d5 145 *pring = ring;
c27a02cd
YP
146 return 0;
147
ddae0349
EE
148err_reserve:
149 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
c27a02cd 150err_hwq_res:
e3f42f84 151 mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
c27a02cd
YP
152err_bounce:
153 kfree(ring->bounce_buf);
154 ring->bounce_buf = NULL;
41d942d5 155err_info:
dc9b06d1 156 kvfree(ring->tx_info);
c27a02cd 157 ring->tx_info = NULL;
41d942d5
EE
158err_ring:
159 kfree(ring);
160 *pring = NULL;
c27a02cd
YP
161 return err;
162}
163
164void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
41d942d5 165 struct mlx4_en_tx_ring **pring)
c27a02cd
YP
166{
167 struct mlx4_en_dev *mdev = priv->mdev;
41d942d5 168 struct mlx4_en_tx_ring *ring = *pring;
453a6082 169 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
c27a02cd 170
0fef9d03 171 if (ring->bf_alloced)
87a5c389 172 mlx4_bf_free(mdev->dev, &ring->bf);
e3f42f84
ED
173 mlx4_qp_remove(mdev->dev, &ring->sp_qp);
174 mlx4_qp_free(mdev->dev, &ring->sp_qp);
0eb08514 175 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
e3f42f84 176 mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
c27a02cd
YP
177 kfree(ring->bounce_buf);
178 ring->bounce_buf = NULL;
dc9b06d1 179 kvfree(ring->tx_info);
c27a02cd 180 ring->tx_info = NULL;
41d942d5
EE
181 kfree(ring);
182 *pring = NULL;
c27a02cd
YP
183}
184
185int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
186 struct mlx4_en_tx_ring *ring,
0e98b523 187 int cq, int user_prio)
c27a02cd
YP
188{
189 struct mlx4_en_dev *mdev = priv->mdev;
190 int err;
191
e3f42f84 192 ring->sp_cqn = cq;
c27a02cd
YP
193 ring->prod = 0;
194 ring->cons = 0xffffffff;
195 ring->last_nr_txbb = 1;
c27a02cd
YP
196 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
197 memset(ring->buf, 0, ring->buf_size);
9ecc2d86 198 ring->free_tx_desc = mlx4_en_free_tx_desc;
c27a02cd 199
e3f42f84
ED
200 ring->sp_qp_state = MLX4_QP_STATE_RST;
201 ring->doorbell_qpn = cpu_to_be32(ring->sp_qp.qpn << 8);
6a4e8121 202 ring->mr_key = cpu_to_be32(mdev->mr.key);
c27a02cd 203
e3f42f84
ED
204 mlx4_en_fill_qp_context(priv, ring->size, ring->sp_stride, 1, 0, ring->qpn,
205 ring->sp_cqn, user_prio, &ring->sp_context);
0fef9d03 206 if (ring->bf_alloced)
e3f42f84 207 ring->sp_context.usr_page =
85743f1e
HN
208 cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
209 ring->bf.uar->index));
c27a02cd 210
e3f42f84
ED
211 err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context,
212 &ring->sp_qp, &ring->sp_qp_state);
213 if (!cpumask_empty(&ring->sp_affinity_mask))
214 netif_set_xps_queue(priv->dev, &ring->sp_affinity_mask,
d03a68f8 215 ring->queue_index);
c27a02cd
YP
216
217 return err;
218}
219
220void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
221 struct mlx4_en_tx_ring *ring)
222{
223 struct mlx4_en_dev *mdev = priv->mdev;
224
e3f42f84
ED
225 mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state,
226 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->sp_qp);
c27a02cd
YP
227}
228
488a9b48
IS
229static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
230{
231 return ring->prod - ring->cons > ring->full_size;
232}
233
2d4b6466
EE
234static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
235 struct mlx4_en_tx_ring *ring, int index,
236 u8 owner)
237{
238 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
9573e0d3 239 struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
2d4b6466
EE
240 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
241 void *end = ring->buf + ring->buf_size;
242 __be32 *ptr = (__be32 *)tx_desc;
243 int i;
244
245 /* Optimize the common case when there are no wraparounds */
9573e0d3
TT
246 if (likely((void *)tx_desc +
247 (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
2d4b6466 248 /* Stamp the freed descriptor */
9573e0d3 249 for (i = 0; i < tx_info->nr_txbb << LOG_TXBB_SIZE;
2d4b6466
EE
250 i += STAMP_STRIDE) {
251 *ptr = stamp;
252 ptr += STAMP_DWORDS;
253 }
254 } else {
255 /* Stamp the freed descriptor */
9573e0d3 256 for (i = 0; i < tx_info->nr_txbb << LOG_TXBB_SIZE;
2d4b6466
EE
257 i += STAMP_STRIDE) {
258 *ptr = stamp;
259 ptr += STAMP_DWORDS;
260 if ((void *)ptr >= end) {
261 ptr = ring->buf;
262 stamp ^= cpu_to_be32(0x80000000);
263 }
264 }
265 }
266}
267
310660a1
ED
268INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
269 struct mlx4_en_tx_ring *ring,
270 int index, u64 timestamp,
271 int napi_mode));
c27a02cd 272
9ecc2d86
BB
273u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
274 struct mlx4_en_tx_ring *ring,
cf97050d 275 int index, u64 timestamp,
9ecc2d86 276 int napi_mode)
c27a02cd 277{
c27a02cd 278 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
9573e0d3 279 struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
c27a02cd 280 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
c27a02cd 281 void *end = ring->buf + ring->buf_size;
3d03641c
ED
282 struct sk_buff *skb = tx_info->skb;
283 int nr_maps = tx_info->nr_maps;
c27a02cd 284 int i;
ec693d47 285
29d40c90
ED
286 /* We do not touch skb here, so prefetch skb->users location
287 * to speedup consume_skb()
288 */
289 prefetchw(&skb->users);
290
3d03641c
ED
291 if (unlikely(timestamp)) {
292 struct skb_shared_hwtstamps hwts;
293
294 mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp);
ec693d47
AV
295 skb_tstamp_tx(skb, &hwts);
296 }
c27a02cd 297
4c07c132
TT
298 if (!tx_info->inl) {
299 if (tx_info->linear)
300 dma_unmap_single(priv->ddev,
301 tx_info->map0_dma,
302 tx_info->map0_byte_count,
eb9c5c0d 303 DMA_TO_DEVICE);
4c07c132
TT
304 else
305 dma_unmap_page(priv->ddev,
306 tx_info->map0_dma,
307 tx_info->map0_byte_count,
eb9c5c0d 308 DMA_TO_DEVICE);
4c07c132
TT
309 /* Optimize the common case when there are no wraparounds */
310 if (likely((void *)tx_desc +
311 (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
3d03641c
ED
312 for (i = 1; i < nr_maps; i++) {
313 data++;
ebf8c9aa 314 dma_unmap_page(priv->ddev,
3d03641c
ED
315 (dma_addr_t)be64_to_cpu(data->addr),
316 be32_to_cpu(data->byte_count),
eb9c5c0d 317 DMA_TO_DEVICE);
41efea5a 318 }
4c07c132
TT
319 } else {
320 if ((void *)data >= end)
43d620c8 321 data = ring->buf + ((void *)data - end);
c27a02cd 322
3d03641c
ED
323 for (i = 1; i < nr_maps; i++) {
324 data++;
41efea5a
YP
325 /* Check for wraparound before unmapping */
326 if ((void *) data >= end)
43d620c8 327 data = ring->buf;
ebf8c9aa 328 dma_unmap_page(priv->ddev,
3d03641c
ED
329 (dma_addr_t)be64_to_cpu(data->addr),
330 be32_to_cpu(data->byte_count),
eb9c5c0d 331 DMA_TO_DEVICE);
41efea5a 332 }
c27a02cd 333 }
c27a02cd 334 }
b4a53379
JDB
335 napi_consume_skb(skb, napi_mode);
336
c27a02cd
YP
337 return tx_info->nr_txbb;
338}
339
310660a1
ED
340INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
341 struct mlx4_en_tx_ring *ring,
342 int index, u64 timestamp,
343 int napi_mode));
344
9ecc2d86
BB
345u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
346 struct mlx4_en_tx_ring *ring,
cf97050d 347 int index, u64 timestamp,
9ecc2d86
BB
348 int napi_mode)
349{
350 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
351 struct mlx4_en_rx_alloc frame = {
352 .page = tx_info->page,
353 .dma = tx_info->map0_dma,
9ecc2d86
BB
354 };
355
b2b8a927 356 if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
9ecc2d86 357 dma_unmap_page(priv->ddev, tx_info->map0_dma,
69ba9431 358 PAGE_SIZE, priv->dma_dir);
9ecc2d86
BB
359 put_page(tx_info->page);
360 }
361
362 return tx_info->nr_txbb;
363}
c27a02cd
YP
364
365int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
366{
367 struct mlx4_en_priv *priv = netdev_priv(dev);
368 int cnt = 0;
369
370 /* Skip last polled descriptor */
371 ring->cons += ring->last_nr_txbb;
453a6082 372 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
c27a02cd
YP
373 ring->cons, ring->prod);
374
375 if ((u32) (ring->prod - ring->cons) > ring->size) {
376 if (netif_msg_tx_err(priv))
453a6082 377 en_warn(priv, "Tx consumer passed producer!\n");
c27a02cd
YP
378 return 0;
379 }
380
381 while (ring->cons != ring->prod) {
9ecc2d86 382 ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
c27a02cd 383 ring->cons & ring->size_mask,
cf97050d 384 0, 0 /* Non-NAPI caller */);
c27a02cd
YP
385 ring->cons += ring->last_nr_txbb;
386 cnt++;
387 }
388
67f8b1dc
TT
389 if (ring->tx_queue)
390 netdev_tx_reset_queue(ring->tx_queue);
41b74920 391
c27a02cd 392 if (cnt)
453a6082 393 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
c27a02cd
YP
394
395 return cnt;
396}
397
ba603d9d
MS
398static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
399 u16 cqe_index, struct mlx4_en_tx_ring *ring)
400{
401 struct mlx4_en_dev *mdev = priv->mdev;
402 struct mlx4_en_tx_info *tx_info;
403 struct mlx4_en_tx_desc *tx_desc;
404 u16 wqe_index;
405 int desc_size;
406
407 en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
408 ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
409 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
410 false);
411
412 wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
413 tx_info = &ring->tx_info[wqe_index];
414 desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
415 en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
416 wqe_index, desc_size);
417 tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
418 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
419
420 if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
421 return;
422
423 en_err(priv, "Scheduling port restart\n");
424 queue_work(mdev->workqueue, &priv->restart_task);
425}
426
cf4058db
ED
427int mlx4_en_process_tx_cq(struct net_device *dev,
428 struct mlx4_en_cq *cq, int napi_budget)
c27a02cd
YP
429{
430 struct mlx4_en_priv *priv = netdev_priv(dev);
431 struct mlx4_cq *mcq = &cq->mcq;
67f8b1dc 432 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
f0ab34f0 433 struct mlx4_cqe *cqe;
cc26a490 434 u16 index, ring_index, stamp_index;
c27a02cd 435 u32 txbbs_skipped = 0;
2d4b6466 436 u32 txbbs_stamp = 0;
f0ab34f0
YP
437 u32 cons_index = mcq->cons_index;
438 int size = cq->size;
439 u32 size_mask = ring->size_mask;
440 struct mlx4_cqe *buf = cq->buf;
5b263f53
YP
441 u32 packets = 0;
442 u32 bytes = 0;
08ff3235 443 int factor = priv->cqe_factor;
0276a330 444 int done = 0;
fbc6daf1 445 int budget = priv->tx_work_limit;
fb1843ee
ED
446 u32 last_nr_txbb;
447 u32 ring_cons;
c27a02cd 448
cc26a490 449 if (unlikely(!priv->port_up))
cf4058db 450 return 0;
c27a02cd 451
53511453
ED
452 netdev_txq_bql_complete_prefetchw(ring->tx_queue);
453
f0ab34f0 454 index = cons_index & size_mask;
b1b6b4da 455 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
6aa7de05
MR
456 last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
457 ring_cons = READ_ONCE(ring->cons);
fb1843ee 458 ring_index = ring_cons & size_mask;
2d4b6466 459 stamp_index = ring_index;
f0ab34f0
YP
460
461 /* Process all completed CQEs */
462 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
0276a330 463 cons_index & size) && (done < budget)) {
cc26a490
TT
464 u16 new_index;
465
f0ab34f0
YP
466 /*
467 * make sure we read the CQE after we read the
468 * ownership bit
469 */
12b3375f 470 dma_rmb();
f0ab34f0 471
bd2f631d 472 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
ba603d9d
MS
473 MLX4_CQE_OPCODE_ERROR))
474 if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
475 mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
476 ring);
bd2f631d 477
f0ab34f0
YP
478 /* Skip over last polled CQE */
479 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
480
c27a02cd 481 do {
fc96256c
ED
482 u64 timestamp = 0;
483
fb1843ee
ED
484 txbbs_skipped += last_nr_txbb;
485 ring_index = (ring_index + last_nr_txbb) & size_mask;
fc96256c
ED
486
487 if (unlikely(ring->tx_info[ring_index].ts_requested))
ec693d47
AV
488 timestamp = mlx4_en_get_cqe_ts(cqe);
489
f0ab34f0 490 /* free next descriptor */
310660a1
ED
491 last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc,
492 mlx4_en_free_tx_desc,
493 mlx4_en_recycle_tx_desc,
f0ab34f0 494 priv, ring, ring_index,
cf97050d 495 timestamp, napi_budget);
2d4b6466
EE
496
497 mlx4_en_stamp_wqe(priv, ring, stamp_index,
fb1843ee 498 !!((ring_cons + txbbs_stamp) &
2d4b6466
EE
499 ring->size));
500 stamp_index = ring_index;
501 txbbs_stamp = txbbs_skipped;
5b263f53
YP
502 packets++;
503 bytes += ring->tx_info[ring_index].nr_bytes;
0276a330 504 } while ((++done < budget) && (ring_index != new_index));
f0ab34f0
YP
505
506 ++cons_index;
507 index = cons_index & size_mask;
b1b6b4da 508 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
f0ab34f0 509 }
c27a02cd 510
c27a02cd
YP
511 /*
512 * To prevent CQ overflow we first update CQ consumer and only then
513 * the ring consumer.
514 */
f0ab34f0 515 mcq->cons_index = cons_index;
c27a02cd
YP
516 mlx4_cq_set_ci(mcq);
517 wmb();
fb1843ee
ED
518
519 /* we want to dirty this cache line once */
6aa7de05
MR
520 WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
521 WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
fb1843ee 522
cc26a490 523 if (cq->type == TX_XDP)
cf4058db 524 return done;
9ecc2d86 525
5b263f53 526 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
c27a02cd 527
488a9b48 528 /* Wakeup Tx queue if this stopped, and ring is not full.
c18520bd 529 */
488a9b48
IS
530 if (netif_tx_queue_stopped(ring->tx_queue) &&
531 !mlx4_en_is_tx_ring_full(ring)) {
c18520bd 532 netif_tx_wake_queue(ring->tx_queue);
15bffdff 533 ring->wake_queue++;
c27a02cd 534 }
cc26a490 535
cf4058db 536 return done;
c27a02cd
YP
537}
538
539void mlx4_en_tx_irq(struct mlx4_cq *mcq)
540{
541 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
542 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
c27a02cd 543
477b35b4
ED
544 if (likely(priv->port_up))
545 napi_schedule_irqoff(&cq->napi);
0276a330
EE
546 else
547 mlx4_en_arm_cq(priv, cq);
c27a02cd
YP
548}
549
0276a330
EE
550/* TX CQ polling - called by NAPI */
551int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
552{
553 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
554 struct net_device *dev = cq->dev;
555 struct mlx4_en_priv *priv = netdev_priv(dev);
cf4058db 556 int work_done;
0276a330 557
cf4058db
ED
558 work_done = mlx4_en_process_tx_cq(dev, cq, budget);
559 if (work_done >= budget)
fbc6daf1 560 return budget;
0276a330 561
cf4058db
ED
562 if (napi_complete_done(napi, work_done))
563 mlx4_en_arm_cq(priv, cq);
fbc6daf1
AV
564
565 return 0;
0276a330 566}
c27a02cd 567
c27a02cd
YP
568static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
569 struct mlx4_en_tx_ring *ring,
570 u32 index,
571 unsigned int desc_size)
572{
9573e0d3 573 u32 copy = (ring->size - index) << LOG_TXBB_SIZE;
c27a02cd
YP
574 int i;
575
576 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
577 if ((i & (TXBB_SIZE - 1)) == 0)
578 wmb();
579
580 *((u32 *) (ring->buf + i)) =
581 *((u32 *) (ring->bounce_buf + copy + i));
582 }
583
584 for (i = copy - 4; i >= 4 ; i -= 4) {
585 if ((i & (TXBB_SIZE - 1)) == 0)
586 wmb();
587
9573e0d3 588 *((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) =
c27a02cd
YP
589 *((u32 *) (ring->bounce_buf + i));
590 }
591
592 /* Return real descriptor location */
9573e0d3 593 return ring->buf + (index << LOG_TXBB_SIZE);
c27a02cd
YP
594}
595
acea73d6
ED
596/* Decide if skb can be inlined in tx descriptor to avoid dma mapping
597 *
598 * It seems strange we do not simply use skb_copy_bits().
599 * This would allow to inline all skbs iff skb->len <= inline_thold
600 *
601 * Note that caller already checked skb was not a gso packet
602 */
7dfa4b41 603static bool is_inline(int inline_thold, const struct sk_buff *skb,
b9d8839a 604 const struct skb_shared_info *shinfo,
7dfa4b41 605 void **pfrag)
c27a02cd
YP
606{
607 void *ptr;
608
acea73d6
ED
609 if (skb->len > inline_thold || !inline_thold)
610 return false;
c27a02cd 611
acea73d6
ED
612 if (shinfo->nr_frags == 1) {
613 ptr = skb_frag_address_safe(&shinfo->frags[0]);
614 if (unlikely(!ptr))
615 return false;
616 *pfrag = ptr;
617 return true;
c27a02cd 618 }
acea73d6
ED
619 if (shinfo->nr_frags)
620 return false;
621 return true;
c27a02cd
YP
622}
623
7dfa4b41 624static int inline_size(const struct sk_buff *skb)
c27a02cd
YP
625{
626 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
627 <= MLX4_INLINE_ALIGN)
628 return ALIGN(skb->len + CTRL_SIZE +
629 sizeof(struct mlx4_wqe_inline_seg), 16);
630 else
631 return ALIGN(skb->len + CTRL_SIZE + 2 *
632 sizeof(struct mlx4_wqe_inline_seg), 16);
633}
634
7dfa4b41 635static int get_real_size(const struct sk_buff *skb,
b9d8839a 636 const struct skb_shared_info *shinfo,
7dfa4b41 637 struct net_device *dev,
acea73d6
ED
638 int *lso_header_size,
639 bool *inline_ok,
1169a642
ED
640 void **pfrag,
641 int *hopbyhop)
c27a02cd
YP
642{
643 struct mlx4_en_priv *priv = netdev_priv(dev);
c27a02cd
YP
644 int real_size;
645
b9d8839a 646 if (shinfo->gso_size) {
acea73d6 647 *inline_ok = false;
1169a642
ED
648 *hopbyhop = 0;
649 if (skb->encapsulation) {
504148fe 650 *lso_header_size = skb_inner_tcp_all_headers(skb);
1169a642
ED
651 } else {
652 /* Detects large IPV6 TCP packets and prepares for removal of
653 * HBH header that has been pushed by ip6_xmit(),
654 * mainly so that tcpdump can dissect them.
655 */
656 if (ipv6_has_hopopt_jumbo(skb))
657 *hopbyhop = sizeof(struct hop_jumbo_hdr);
504148fe 658 *lso_header_size = skb_tcp_all_headers(skb);
1169a642 659 }
b9d8839a 660 real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
1169a642 661 ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
c27a02cd
YP
662 if (unlikely(*lso_header_size != skb_headlen(skb))) {
663 /* We add a segment for the skb linear buffer only if
664 * it contains data */
665 if (*lso_header_size < skb_headlen(skb))
666 real_size += DS_SIZE;
667 else {
668 if (netif_msg_tx_err(priv))
453a6082 669 en_warn(priv, "Non-linear headers\n");
c27a02cd
YP
670 return 0;
671 }
672 }
c27a02cd
YP
673 } else {
674 *lso_header_size = 0;
acea73d6
ED
675 *inline_ok = is_inline(priv->prof->inline_thold, skb,
676 shinfo, pfrag);
677
678 if (*inline_ok)
c27a02cd 679 real_size = inline_size(skb);
acea73d6
ED
680 else
681 real_size = CTRL_SIZE +
682 (shinfo->nr_frags + 1) * DS_SIZE;
c27a02cd
YP
683 }
684
685 return real_size;
686}
687
7dfa4b41
ED
688static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
689 const struct sk_buff *skb,
b9d8839a 690 const struct skb_shared_info *shinfo,
224e92e0 691 void *fragptr)
c27a02cd
YP
692{
693 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
31975e27 694 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof(*inl);
e533ac7e 695 unsigned int hlen = skb_headlen(skb);
c27a02cd
YP
696
697 if (skb->len <= spc) {
93591aaa
EE
698 if (likely(skb->len >= MIN_PKT_LEN)) {
699 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
700 } else {
701 inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
f8f185e3 702 memset(inl->data + skb->len, 0,
93591aaa
EE
703 MIN_PKT_LEN - skb->len);
704 }
f8f185e3 705 skb_copy_from_linear_data(skb, inl->data, hlen);
b9d8839a 706 if (shinfo->nr_frags)
f8f185e3 707 memcpy(inl->data + hlen, fragptr,
b9d8839a 708 skb_frag_size(&shinfo->frags[0]));
c27a02cd
YP
709
710 } else {
711 inl->byte_count = cpu_to_be32(1 << 31 | spc);
e533ac7e 712 if (hlen <= spc) {
f8f185e3 713 skb_copy_from_linear_data(skb, inl->data, hlen);
e533ac7e 714 if (hlen < spc) {
f8f185e3 715 memcpy(inl->data + hlen,
e533ac7e
ED
716 fragptr, spc - hlen);
717 fragptr += spc - hlen;
c27a02cd 718 }
f8f185e3
KC
719 inl = (void *)inl->data + spc;
720 memcpy(inl->data, fragptr, skb->len - spc);
c27a02cd 721 } else {
f8f185e3
KC
722 skb_copy_from_linear_data(skb, inl->data, spc);
723 inl = (void *)inl->data + spc;
724 skb_copy_from_linear_data_offset(skb, spc, inl->data,
e533ac7e 725 hlen - spc);
b9d8839a 726 if (shinfo->nr_frags)
f8f185e3 727 memcpy(inl->data + hlen - spc,
b9d8839a
ED
728 fragptr,
729 skb_frag_size(&shinfo->frags[0]));
c27a02cd
YP
730 }
731
12b3375f 732 dma_wmb();
c27a02cd
YP
733 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
734 }
c27a02cd
YP
735}
736
f663dd9a 737u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
a350ecce 738 struct net_device *sb_dev)
c27a02cd 739{
bc6a4744 740 struct mlx4_en_priv *priv = netdev_priv(dev);
d317966b 741 u16 rings_p_up = priv->num_tx_rings_p_up;
c27a02cd 742
4b5e5b7e 743 if (netdev_get_num_tc(dev))
a350ecce 744 return netdev_pick_tx(dev, skb, NULL);
bc6a4744 745
a350ecce 746 return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
c27a02cd
YP
747}
748
7dfa4b41
ED
749static void mlx4_bf_copy(void __iomem *dst, const void *src,
750 unsigned int bytecnt)
87a5c389
YP
751{
752 __iowrite64_copy(dst, src, bytecnt / 8);
753}
754
224e92e0
BB
755void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
756{
757 wmb();
758 /* Since there is no iowrite*_native() that writes the
759 * value as is, without byteswapping - using the one
760 * the doesn't do byteswapping in the relevant arch
761 * endianness.
762 */
763#if defined(__LITTLE_ENDIAN)
764 iowrite32(
765#else
766 iowrite32be(
767#endif
9ac93627 768 (__force u32)ring->doorbell_qpn, ring->doorbell_address);
224e92e0
BB
769}
770
771static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
772 struct mlx4_en_tx_desc *tx_desc,
773 union mlx4_wqe_qpn_vlan qpn_vlan,
774 int desc_size, int bf_index,
775 __be32 op_own, bool bf_ok,
776 bool send_doorbell)
777{
778 tx_desc->ctrl.qpn_vlan = qpn_vlan;
779
780 if (bf_ok) {
781 op_own |= htonl((bf_index & 0xffff) << 8);
782 /* Ensure new descriptor hits memory
783 * before setting ownership of this descriptor to HW
784 */
785 dma_wmb();
786 tx_desc->ctrl.owner_opcode = op_own;
787
788 wmb();
789
790 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl,
791 desc_size);
792
793 wmb();
794
795 ring->bf.offset ^= ring->bf.buf_size;
796 } else {
797 /* Ensure new descriptor hits memory
798 * before setting ownership of this descriptor to HW
799 */
800 dma_wmb();
801 tx_desc->ctrl.owner_opcode = op_own;
802 if (send_doorbell)
803 mlx4_en_xmit_doorbell(ring);
804 else
805 ring->xmit_more++;
806 }
807}
808
f28186d6
TT
809static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
810 struct skb_shared_info *shinfo,
811 struct mlx4_wqe_data_seg *data,
812 struct sk_buff *skb,
813 int lso_header_size,
814 __be32 mr_key,
815 struct mlx4_en_tx_info *tx_info)
816{
817 struct device *ddev = priv->ddev;
818 dma_addr_t dma = 0;
819 u32 byte_count = 0;
820 int i_frag;
821
822 /* Map fragments if any */
823 for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
d7840976 824 const skb_frag_t *frag = &shinfo->frags[i_frag];
f28186d6
TT
825 byte_count = skb_frag_size(frag);
826 dma = skb_frag_dma_map(ddev, frag,
827 0, byte_count,
828 DMA_TO_DEVICE);
829 if (dma_mapping_error(ddev, dma))
830 goto tx_drop_unmap;
831
832 data->addr = cpu_to_be64(dma);
833 data->lkey = mr_key;
834 dma_wmb();
835 data->byte_count = cpu_to_be32(byte_count);
836 --data;
837 }
838
839 /* Map linear part if needed */
840 if (tx_info->linear) {
841 byte_count = skb_headlen(skb) - lso_header_size;
842
843 dma = dma_map_single(ddev, skb->data +
844 lso_header_size, byte_count,
eb9c5c0d 845 DMA_TO_DEVICE);
f28186d6
TT
846 if (dma_mapping_error(ddev, dma))
847 goto tx_drop_unmap;
848
849 data->addr = cpu_to_be64(dma);
850 data->lkey = mr_key;
851 dma_wmb();
852 data->byte_count = cpu_to_be32(byte_count);
853 }
854 /* tx completion can avoid cache line miss for common cases */
855 tx_info->map0_dma = dma;
856 tx_info->map0_byte_count = byte_count;
857
858 return true;
859
860tx_drop_unmap:
861 en_err(priv, "DMA mapping error\n");
862
863 while (++i_frag < shinfo->nr_frags) {
864 ++data;
865 dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr),
866 be32_to_cpu(data->byte_count),
eb9c5c0d 867 DMA_TO_DEVICE);
f28186d6
TT
868 }
869
870 return false;
871}
872
61357325 873netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
c27a02cd 874{
b9d8839a 875 struct skb_shared_info *shinfo = skb_shinfo(skb);
c27a02cd 876 struct mlx4_en_priv *priv = netdev_priv(dev);
224e92e0 877 union mlx4_wqe_qpn_vlan qpn_vlan = {};
c27a02cd 878 struct mlx4_en_tx_ring *ring;
c27a02cd
YP
879 struct mlx4_en_tx_desc *tx_desc;
880 struct mlx4_wqe_data_seg *data;
c27a02cd 881 struct mlx4_en_tx_info *tx_info;
7c8c0291 882 u32 __maybe_unused ring_cons;
f28186d6 883 int tx_ind;
c27a02cd
YP
884 int nr_txbb;
885 int desc_size;
886 int real_size;
87a5c389 887 u32 index, bf_index;
1169a642 888 struct ipv6hdr *h6;
c27a02cd 889 __be32 op_own;
c27a02cd 890 int lso_header_size;
acea73d6 891 void *fragptr = NULL;
87a5c389 892 bool bounce = false;
5804283d 893 bool send_doorbell;
fe971b95 894 bool stop_queue;
acea73d6 895 bool inline_ok;
f28186d6 896 u8 data_offset;
1169a642 897 int hopbyhop;
224e92e0 898 bool bf_ok;
c27a02cd 899
f905c79e 900 tx_ind = skb_get_queue_mapping(skb);
67f8b1dc 901 ring = priv->tx_ring[TX][tx_ind];
f905c79e 902
f28186d6 903 if (unlikely(!priv->port_up))
63a664b7
ED
904 goto tx_drop;
905
acea73d6 906 real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
1169a642 907 &inline_ok, &fragptr, &hopbyhop);
c27a02cd 908 if (unlikely(!real_size))
7a61fc86 909 goto tx_drop_count;
c27a02cd 910
25985edc 911 /* Align descriptor to TXBB size */
c27a02cd 912 desc_size = ALIGN(real_size, TXBB_SIZE);
9573e0d3 913 nr_txbb = desc_size >> LOG_TXBB_SIZE;
c27a02cd 914
224e92e0 915 bf_ok = ring->bf_enabled;
e38af4fa 916 if (skb_vlan_tag_present(skb)) {
f28186d6
TT
917 u16 vlan_proto;
918
224e92e0 919 qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
e38af4fa 920 vlan_proto = be16_to_cpu(skb->vlan_proto);
224e92e0
BB
921 if (vlan_proto == ETH_P_8021AD)
922 qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
923 else if (vlan_proto == ETH_P_8021Q)
924 qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
925 else
926 qpn_vlan.ins_vlan = 0;
927 bf_ok = false;
e38af4fa 928 }
c27a02cd 929
53511453 930 netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
29d40c90 931
c27a02cd
YP
932 /* Packet is good - grab an index and transmit it */
933 index = ring->prod & ring->size_mask;
87a5c389 934 bf_index = ring->prod;
c27a02cd
YP
935
936 /* See if we have enough space for whole descriptor TXBB for setting
937 * SW ownership on next descriptor; if not, use a bounce buffer. */
938 if (likely(index + nr_txbb <= ring->size))
9573e0d3 939 tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
87a5c389 940 else {
0e706f79
ED
941 if (unlikely(nr_txbb > MLX4_MAX_DESC_TXBBS)) {
942 if (netif_msg_tx_err(priv))
943 en_warn(priv, "Oversized header or SG list\n");
944 goto tx_drop_count;
945 }
c27a02cd 946 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
87a5c389 947 bounce = true;
224e92e0 948 bf_ok = false;
87a5c389 949 }
c27a02cd
YP
950
951 /* Save skb in tx_info ring */
952 tx_info = &ring->tx_info[index];
953 tx_info->skb = skb;
954 tx_info->nr_txbb = nr_txbb;
955
f28186d6
TT
956 if (!lso_header_size) {
957 data = &tx_desc->data;
958 data_offset = offsetof(struct mlx4_en_tx_desc, data);
959 } else {
1169a642 960 int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
f28186d6
TT
961
962 data = (void *)&tx_desc->lso + lso_align;
963 data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
964 }
237a3a3b
AV
965
966 /* valid only for none inline segments */
f28186d6 967 tx_info->data_offset = data_offset;
237a3a3b 968
acea73d6
ED
969 tx_info->inl = inline_ok;
970
f28186d6 971 tx_info->linear = lso_header_size < skb_headlen(skb) && !inline_ok;
237a3a3b 972
b9d8839a 973 tx_info->nr_maps = shinfo->nr_frags + tx_info->linear;
3d03641c 974 data += tx_info->nr_maps - 1;
237a3a3b 975
f28186d6
TT
976 if (!tx_info->inl)
977 if (!mlx4_en_build_dma_wqe(priv, shinfo, data, skb,
978 lso_header_size, ring->mr_key,
979 tx_info))
980 goto tx_drop_count;
237a3a3b 981
ec693d47
AV
982 /*
983 * For timestamping add flag to skb_shinfo and
984 * set flag for further reference
985 */
e70602a8 986 tx_info->ts_requested = 0;
7dfa4b41
ED
987 if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON &&
988 shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
989 shinfo->tx_flags |= SKBTX_IN_PROGRESS;
ec693d47
AV
990 tx_info->ts_requested = 1;
991 }
992
c27a02cd
YP
993 /* Prepare ctrl segement apart opcode+ownership, which depends on
994 * whether LSO is used */
60d6fe99 995 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
c27a02cd 996 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
a4f2dacb
OG
997 if (!skb->encapsulation)
998 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
999 MLX4_WQE_CTRL_TCP_UDP_CSUM);
1000 else
1001 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
ad04378c 1002 ring->tx_csum++;
c27a02cd
YP
1003 }
1004
79aeaccd 1005 if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
5f1cd200
AV
1006 struct ethhdr *ethh;
1007
213815a1
YB
1008 /* Copy dst mac address to wqe. This allows loopback in eSwitch,
1009 * so that VFs and PF can communicate with each other
1010 */
1011 ethh = (struct ethhdr *)skb->data;
1012 tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
1013 tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
1014 }
1015
c27a02cd
YP
1016 /* Handle LSO (TSO) packets */
1017 if (lso_header_size) {
b9d8839a
ED
1018 int i;
1019
c27a02cd
YP
1020 /* Mark opcode as LSO */
1021 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
1022 ((ring->prod & ring->size) ?
1023 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
1024
1169a642 1025 lso_header_size -= hopbyhop;
c27a02cd
YP
1026 /* Fill in the LSO prefix */
1027 tx_desc->lso.mss_hdr_size = cpu_to_be32(
b9d8839a 1028 shinfo->gso_size << 16 | lso_header_size);
c27a02cd 1029
c27a02cd 1030
1169a642
ED
1031 if (unlikely(hopbyhop)) {
1032 /* remove the HBH header.
1033 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
1034 */
1035 memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
1036 h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
1037 h6->nexthdr = IPPROTO_TCP;
1038 /* Copy the TCP header after the IPv6 one */
1039 memcpy(h6 + 1,
1040 skb->data + ETH_HLEN + sizeof(*h6) +
1041 sizeof(struct hop_jumbo_hdr),
1042 tcp_hdrlen(skb));
1043 /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
1044 } else {
1045 /* Copy headers;
1046 * note that we already verified that it is linear
1047 */
1048 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
1049 }
9fab426d 1050 ring->tso_packets++;
b9d8839a 1051
75d04aa3 1052 i = shinfo->gso_segs;
5b263f53 1053 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
c27a02cd
YP
1054 ring->packets += i;
1055 } else {
1056 /* Normal (Non LSO) packet */
1057 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
1058 ((ring->prod & ring->size) ?
1059 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
5b263f53 1060 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
c27a02cd 1061 ring->packets++;
c27a02cd 1062 }
5b263f53 1063 ring->bytes += tx_info->nr_bytes;
c27a02cd 1064
acea73d6 1065 if (tx_info->inl)
224e92e0 1066 build_inline_wqe(tx_desc, skb, shinfo, fragptr);
c27a02cd 1067
837052d0 1068 if (skb->encapsulation) {
09067122
AD
1069 union {
1070 struct iphdr *v4;
1071 struct ipv6hdr *v6;
1072 unsigned char *hdr;
1073 } ip;
1074 u8 proto;
1075
1076 ip.hdr = skb_inner_network_header(skb);
1077 proto = (ip.v4->version == 4) ? ip.v4->protocol :
1078 ip.v6->nexthdr;
1079
1080 if (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
837052d0
OG
1081 op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
1082 else
1083 op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
1084 }
1085
c27a02cd
YP
1086 ring->prod += nr_txbb;
1087
1088 /* If we used a bounce buffer then copy descriptor back into place */
7dfa4b41 1089 if (unlikely(bounce))
c27a02cd
YP
1090 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
1091
eb0cabbd
AV
1092 skb_tx_timestamp(skb);
1093
fe971b95 1094 /* Check available TXBBs And 2K spare for prefetch */
488a9b48 1095 stop_queue = mlx4_en_is_tx_ring_full(ring);
fe971b95
ED
1096 if (unlikely(stop_queue)) {
1097 netif_tx_stop_queue(ring->tx_queue);
1098 ring->queue_stopped++;
1099 }
c2973444
ED
1100
1101 send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
1102 tx_info->nr_bytes,
3c31ff22 1103 netdev_xmit_more());
5804283d 1104
6a4e8121
ED
1105 real_size = (real_size / 16) & 0x3f;
1106
224e92e0 1107 bf_ok &= desc_size <= MAX_BF && send_doorbell;
e38af4fa 1108
224e92e0
BB
1109 if (bf_ok)
1110 qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
1111 else
1112 qpn_vlan.fence_size = real_size;
7dfa4b41 1113
224e92e0
BB
1114 mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, desc_size, bf_index,
1115 op_own, bf_ok, send_doorbell);
c27a02cd 1116
fe971b95
ED
1117 if (unlikely(stop_queue)) {
1118 /* If queue was emptied after the if (stop_queue) , and before
1119 * the netif_tx_stop_queue() - need to wake the queue,
1120 * or else it will remain stopped forever.
1121 * Need a memory barrier to make sure ring->cons was not
1122 * updated before queue was stopped.
1123 */
1124 smp_rmb();
1125
488a9b48 1126 if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
fe971b95
ED
1127 netif_tx_wake_queue(ring->tx_queue);
1128 ring->wake_queue++;
1129 }
1130 }
ec634fe3 1131 return NETDEV_TX_OK;
7e230913 1132
7a61fc86
MS
1133tx_drop_count:
1134 ring->tx_dropped++;
7e230913
YP
1135tx_drop:
1136 dev_kfree_skb_any(skb);
7e230913 1137 return NETDEV_TX_OK;
c27a02cd
YP
1138}
1139
36ea7964
TT
1140#define MLX4_EN_XDP_TX_NRTXBB 1
1141#define MLX4_EN_XDP_TX_REAL_SZ (((CTRL_SIZE + MLX4_EN_XDP_TX_NRTXBB * DS_SIZE) \
1142 / 16) & 0x3f)
1143
f025fd60
TT
1144void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv,
1145 struct mlx4_en_tx_ring *ring)
1146{
1147 int i;
1148
1149 for (i = 0; i < ring->size; i++) {
1150 struct mlx4_en_tx_info *tx_info = &ring->tx_info[i];
1151 struct mlx4_en_tx_desc *tx_desc = ring->buf +
1152 (i << LOG_TXBB_SIZE);
1153
1154 tx_info->map0_byte_count = PAGE_SIZE;
1155 tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
1156 tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
1157 tx_info->ts_requested = 0;
1158 tx_info->nr_maps = 1;
1159 tx_info->linear = 1;
1160 tx_info->inl = 0;
1161
1162 tx_desc->data.lkey = ring->mr_key;
1163 tx_desc->ctrl.qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
1164 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
1165 }
1166}
1167
15fca2c8
TT
1168netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
1169 struct mlx4_en_rx_alloc *frame,
5dad61b8 1170 struct mlx4_en_priv *priv, unsigned int length,
36ea7964 1171 int tx_ind, bool *doorbell_pending)
9ecc2d86 1172{
9ecc2d86 1173 struct mlx4_en_tx_desc *tx_desc;
9ecc2d86 1174 struct mlx4_en_tx_info *tx_info;
36ea7964
TT
1175 struct mlx4_wqe_data_seg *data;
1176 struct mlx4_en_tx_ring *ring;
9ecc2d86 1177 dma_addr_t dma;
9ecc2d86 1178 __be32 op_own;
36ea7964 1179 int index;
9ecc2d86 1180
36ea7964
TT
1181 if (unlikely(!priv->port_up))
1182 goto tx_drop;
9ecc2d86 1183
67f8b1dc 1184 ring = priv->tx_ring[TX_XDP][tx_ind];
9ecc2d86 1185
36ea7964 1186 if (unlikely(mlx4_en_is_tx_ring_full(ring)))
7a61fc86 1187 goto tx_drop_count;
9ecc2d86 1188
9ecc2d86
BB
1189 index = ring->prod & ring->size_mask;
1190 tx_info = &ring->tx_info[index];
1191
9573e0d3 1192 tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
9ecc2d86
BB
1193 data = &tx_desc->data;
1194
1195 dma = frame->dma;
1196
1197 tx_info->page = frame->page;
1198 frame->page = NULL;
1199 tx_info->map0_dma = dma;
9ecc2d86 1200 tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
9ecc2d86 1201
ea3349a0 1202 dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset,
eb9c5c0d 1203 length, DMA_TO_DEVICE);
9ecc2d86 1204
ea3349a0 1205 data->addr = cpu_to_be64(dma + frame->page_offset);
9ecc2d86
BB
1206 dma_wmb();
1207 data->byte_count = cpu_to_be32(length);
1208
1209 /* tx completion can avoid cache line miss for common cases */
9ecc2d86
BB
1210
1211 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
1212 ((ring->prod & ring->size) ?
1213 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
1214
15fca2c8 1215 rx_ring->xdp_tx++;
9ecc2d86 1216
36ea7964 1217 ring->prod += MLX4_EN_XDP_TX_NRTXBB;
9ecc2d86 1218
f6f0aa97
TT
1219 /* Ensure new descriptor hits memory
1220 * before setting ownership of this descriptor to HW
1221 */
1222 dma_wmb();
1223 tx_desc->ctrl.owner_opcode = op_own;
1224 ring->xmit_more++;
9ecc2d86 1225
36ea7964 1226 *doorbell_pending = true;
9ecc2d86
BB
1227
1228 return NETDEV_TX_OK;
1229
7a61fc86 1230tx_drop_count:
15fca2c8 1231 rx_ring->xdp_tx_full++;
6c78511b 1232 *doorbell_pending = true;
7a61fc86 1233tx_drop:
9ecc2d86
BB
1234 return NETDEV_TX_BUSY;
1235}