ionic: drop use of subdevice tags
[linux-block.git] / drivers / net / ethernet / pensando / ionic / ionic_txrx.c
CommitLineData
0f3154e6
SN
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4#include <linux/ip.h>
5#include <linux/ipv6.h>
6#include <linux/if_vlan.h>
7#include <net/ip6_checksum.h>
8
9#include "ionic.h"
10#include "ionic_lif.h"
11#include "ionic_txrx.h"
12
13static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
14 struct ionic_cq_info *cq_info, void *cb_arg);
15
16static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
17 ionic_desc_cb cb_func, void *cb_arg)
18{
19 DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
20
21 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
22}
23
24static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
25 ionic_desc_cb cb_func, void *cb_arg)
26{
27 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
28
29 DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
30}
31
32static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
33{
34 return netdev_get_tx_queue(q->lif->netdev, q->index);
35}
36
08f2e4b2
SN
37static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
38 unsigned int len, bool frags)
0f3154e6 39{
08f2e4b2
SN
40 struct ionic_lif *lif = q->lif;
41 struct ionic_rx_stats *stats;
42 struct net_device *netdev;
43 struct sk_buff *skb;
0f3154e6 44
08f2e4b2
SN
45 netdev = lif->netdev;
46 stats = q_to_rx_stats(q);
0f3154e6 47
08f2e4b2
SN
48 if (frags)
49 skb = napi_get_frags(&q_to_qcq(q)->napi);
50 else
51 skb = netdev_alloc_skb_ip_align(netdev, len);
52
53 if (unlikely(!skb)) {
54 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
55 netdev->name, q->name);
56 stats->alloc_err++;
57 return NULL;
58 }
59
60 return skb;
0f3154e6
SN
61}
62
08f2e4b2
SN
63static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
64 struct ionic_desc_info *desc_info,
65 struct ionic_cq_info *cq_info)
0f3154e6
SN
66{
67 struct ionic_rxq_comp *comp = cq_info->cq_desc;
0f3154e6 68 struct device *dev = q->lif->ionic->dev;
08f2e4b2
SN
69 struct ionic_page_info *page_info;
70 struct sk_buff *skb;
71 unsigned int i;
72 u16 frag_len;
73 u16 len;
0f3154e6 74
08f2e4b2
SN
75 page_info = &desc_info->pages[0];
76 len = le16_to_cpu(comp->len);
0f3154e6 77
08f2e4b2 78 prefetch(page_address(page_info->page) + NET_IP_ALIGN);
0f3154e6 79
08f2e4b2
SN
80 skb = ionic_rx_skb_alloc(q, len, true);
81 if (unlikely(!skb))
82 return NULL;
0f3154e6 83
08f2e4b2
SN
84 i = comp->num_sg_elems + 1;
85 do {
86 if (unlikely(!page_info->page)) {
87 struct napi_struct *napi = &q_to_qcq(q)->napi;
0f3154e6 88
08f2e4b2
SN
89 napi->skb = NULL;
90 dev_kfree_skb(skb);
91 return NULL;
92 }
93
94 frag_len = min(len, (u16)PAGE_SIZE);
95 len -= frag_len;
96
97 dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
98 PAGE_SIZE, DMA_FROM_DEVICE);
99 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
100 page_info->page, 0, frag_len, PAGE_SIZE);
101 page_info->page = NULL;
102 page_info++;
103 i--;
104 } while (i > 0);
105
106 return skb;
107}
108
109static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
110 struct ionic_desc_info *desc_info,
111 struct ionic_cq_info *cq_info)
112{
113 struct ionic_rxq_comp *comp = cq_info->cq_desc;
114 struct device *dev = q->lif->ionic->dev;
115 struct ionic_page_info *page_info;
116 struct sk_buff *skb;
117 u16 len;
118
119 page_info = &desc_info->pages[0];
120 len = le16_to_cpu(comp->len);
121
122 skb = ionic_rx_skb_alloc(q, len, false);
123 if (unlikely(!skb))
124 return NULL;
125
126 if (unlikely(!page_info->page)) {
127 dev_kfree_skb(skb);
128 return NULL;
129 }
130
131 dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
132 len, DMA_FROM_DEVICE);
133 skb_copy_to_linear_data(skb, page_address(page_info->page), len);
134 dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
135 len, DMA_FROM_DEVICE);
136
137 skb_put(skb, len);
138 skb->protocol = eth_type_trans(skb, q->lif->netdev);
139
140 return skb;
0f3154e6
SN
141}
142
143static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
144 struct ionic_cq_info *cq_info, void *cb_arg)
145{
146 struct ionic_rxq_comp *comp = cq_info->cq_desc;
147 struct ionic_qcq *qcq = q_to_qcq(q);
0f3154e6
SN
148 struct ionic_rx_stats *stats;
149 struct net_device *netdev;
08f2e4b2 150 struct sk_buff *skb;
0f3154e6
SN
151
152 stats = q_to_rx_stats(q);
153 netdev = q->lif->netdev;
154
08f2e4b2 155 if (comp->status)
0f3154e6 156 return;
0f3154e6 157
08f2e4b2
SN
158 /* no packet processing while resetting */
159 if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state)))
0f3154e6 160 return;
0f3154e6
SN
161
162 stats->pkts++;
163 stats->bytes += le16_to_cpu(comp->len);
164
08f2e4b2
SN
165 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
166 skb = ionic_rx_copybreak(q, desc_info, cq_info);
167 else
168 skb = ionic_rx_frags(q, desc_info, cq_info);
0f3154e6 169
08f2e4b2
SN
170 if (unlikely(!skb))
171 return;
0f3154e6
SN
172
173 skb_record_rx_queue(skb, q->index);
174
08f2e4b2 175 if (likely(netdev->features & NETIF_F_RXHASH)) {
0f3154e6
SN
176 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
177 case IONIC_PKT_TYPE_IPV4:
178 case IONIC_PKT_TYPE_IPV6:
179 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
180 PKT_HASH_TYPE_L3);
181 break;
182 case IONIC_PKT_TYPE_IPV4_TCP:
183 case IONIC_PKT_TYPE_IPV6_TCP:
184 case IONIC_PKT_TYPE_IPV4_UDP:
185 case IONIC_PKT_TYPE_IPV6_UDP:
186 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
187 PKT_HASH_TYPE_L4);
188 break;
189 }
190 }
191
08f2e4b2 192 if (likely(netdev->features & NETIF_F_RXCSUM)) {
0f3154e6
SN
193 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
194 skb->ip_summed = CHECKSUM_COMPLETE;
195 skb->csum = (__wsum)le16_to_cpu(comp->csum);
196 stats->csum_complete++;
197 }
198 } else {
199 stats->csum_none++;
200 }
201
08f2e4b2
SN
202 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
203 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
204 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
0f3154e6
SN
205 stats->csum_error++;
206
08f2e4b2 207 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
0f3154e6
SN
208 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
209 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
210 le16_to_cpu(comp->vlan_tci));
211 }
212
08f2e4b2
SN
213 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
214 napi_gro_receive(&qcq->napi, skb);
215 else
216 napi_gro_frags(&qcq->napi);
0f3154e6
SN
217}
218
219static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
220{
221 struct ionic_rxq_comp *comp = cq_info->cq_desc;
222 struct ionic_queue *q = cq->bound_q;
223 struct ionic_desc_info *desc_info;
224
225 if (!color_match(comp->pkt_type_color, cq->done_color))
226 return false;
227
228 /* check for empty queue */
229 if (q->tail->index == q->head->index)
230 return false;
231
232 desc_info = q->tail;
233 if (desc_info->index != le16_to_cpu(comp->comp_index))
234 return false;
235
236 q->tail = desc_info->next;
237
238 /* clean the related q entry, only one per qc completion */
239 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
240
241 desc_info->cb = NULL;
242 desc_info->cb_arg = NULL;
243
244 return true;
245}
246
247static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit)
248{
249 u32 work_done = 0;
250
251 while (ionic_rx_service(rxcq, rxcq->tail)) {
252 if (rxcq->tail->last)
253 rxcq->done_color = !rxcq->done_color;
254 rxcq->tail = rxcq->tail->next;
255 DEBUG_STATS_CQE_CNT(rxcq);
256
257 if (++work_done >= limit)
258 break;
259 }
260
261 return work_done;
262}
263
264void ionic_rx_flush(struct ionic_cq *cq)
265{
266 struct ionic_dev *idev = &cq->lif->ionic->idev;
267 u32 work_done;
268
269 work_done = ionic_rx_walk_cq(cq, cq->num_descs);
270
271 if (work_done)
272 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
273 work_done, IONIC_INTR_CRED_RESET_COALESCE);
274}
275
08f2e4b2
SN
276static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
277 dma_addr_t *dma_addr)
0f3154e6
SN
278{
279 struct ionic_lif *lif = q->lif;
280 struct ionic_rx_stats *stats;
281 struct net_device *netdev;
0f3154e6 282 struct device *dev;
08f2e4b2 283 struct page *page;
0f3154e6
SN
284
285 netdev = lif->netdev;
286 dev = lif->ionic->dev;
287 stats = q_to_rx_stats(q);
08f2e4b2
SN
288 page = alloc_page(GFP_ATOMIC);
289 if (unlikely(!page)) {
290 net_err_ratelimited("%s: Page alloc failed on %s!\n",
291 netdev->name, q->name);
0f3154e6
SN
292 stats->alloc_err++;
293 return NULL;
294 }
295
08f2e4b2
SN
296 *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
297 if (unlikely(dma_mapping_error(dev, *dma_addr))) {
298 __free_page(page);
299 net_err_ratelimited("%s: DMA single map failed on %s!\n",
300 netdev->name, q->name);
0f3154e6
SN
301 stats->dma_map_err++;
302 return NULL;
303 }
304
08f2e4b2 305 return page;
0f3154e6
SN
306}
307
08f2e4b2
SN
308static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
309 dma_addr_t dma_addr)
310{
311 struct ionic_lif *lif = q->lif;
312 struct net_device *netdev;
313 struct device *dev;
314
315 netdev = lif->netdev;
316 dev = lif->ionic->dev;
317
318 if (unlikely(!page)) {
319 net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
320 netdev->name, q->name);
321 return;
322 }
323
324 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
325
326 __free_page(page);
327}
328
329#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1)
330#define IONIC_RX_RING_HEAD_BUF_SZ 2048
0f3154e6
SN
331
332void ionic_rx_fill(struct ionic_queue *q)
333{
334 struct net_device *netdev = q->lif->netdev;
08f2e4b2
SN
335 struct ionic_desc_info *desc_info;
336 struct ionic_page_info *page_info;
337 struct ionic_rxq_sg_desc *sg_desc;
338 struct ionic_rxq_sg_elem *sg_elem;
0f3154e6 339 struct ionic_rxq_desc *desc;
08f2e4b2 340 unsigned int nfrags;
0f3154e6 341 bool ring_doorbell;
08f2e4b2 342 unsigned int i, j;
0f3154e6 343 unsigned int len;
0f3154e6
SN
344
345 len = netdev->mtu + ETH_HLEN;
08f2e4b2 346 nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
0f3154e6
SN
347
348 for (i = ionic_q_space_avail(q); i; i--) {
08f2e4b2
SN
349 desc_info = q->head;
350 desc = desc_info->desc;
351 sg_desc = desc_info->sg_desc;
352 page_info = &desc_info->pages[0];
353
354 if (page_info->page) { /* recycle the buffer */
355 ring_doorbell = ((q->head->index + 1) &
356 IONIC_RX_RING_DOORBELL_STRIDE) == 0;
357 ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
358 continue;
359 }
0f3154e6 360
08f2e4b2
SN
361 /* fill main descriptor - pages[0] */
362 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
363 IONIC_RXQ_DESC_OPCODE_SIMPLE;
364 desc_info->npages = nfrags;
365 page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
366 if (unlikely(!page_info->page)) {
367 desc->addr = 0;
368 desc->len = 0;
369 return;
370 }
371 desc->addr = cpu_to_le64(page_info->dma_addr);
372 desc->len = cpu_to_le16(PAGE_SIZE);
373 page_info++;
374
375 /* fill sg descriptors - pages[1..n] */
376 for (j = 0; j < nfrags - 1; j++) {
377 if (page_info->page) /* recycle the sg buffer */
378 continue;
379
380 sg_elem = &sg_desc->elems[j];
381 page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
382 if (unlikely(!page_info->page)) {
383 sg_elem->addr = 0;
384 sg_elem->len = 0;
385 return;
386 }
387 sg_elem->addr = cpu_to_le64(page_info->dma_addr);
388 sg_elem->len = cpu_to_le16(PAGE_SIZE);
389 page_info++;
390 }
0f3154e6
SN
391
392 ring_doorbell = ((q->head->index + 1) &
393 IONIC_RX_RING_DOORBELL_STRIDE) == 0;
08f2e4b2 394 ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
0f3154e6
SN
395 }
396}
397
398static void ionic_rx_fill_cb(void *arg)
399{
400 ionic_rx_fill(arg);
401}
402
403void ionic_rx_empty(struct ionic_queue *q)
404{
0f3154e6
SN
405 struct ionic_desc_info *cur;
406 struct ionic_rxq_desc *desc;
08f2e4b2 407 unsigned int i;
0f3154e6
SN
408
409 for (cur = q->tail; cur != q->head; cur = cur->next) {
410 desc = cur->desc;
08f2e4b2
SN
411 desc->addr = 0;
412 desc->len = 0;
413
08f2e4b2
SN
414 for (i = 0; i < cur->npages; i++) {
415 if (likely(cur->pages[i].page)) {
416 ionic_rx_page_free(q, cur->pages[i].page,
417 cur->pages[i].dma_addr);
418 cur->pages[i].page = NULL;
419 cur->pages[i].dma_addr = 0;
420 }
421 }
422
0f3154e6
SN
423 cur->cb_arg = NULL;
424 }
425}
426
427int ionic_rx_napi(struct napi_struct *napi, int budget)
428{
429 struct ionic_qcq *qcq = napi_to_qcq(napi);
430 struct ionic_cq *rxcq = napi_to_cq(napi);
431 unsigned int qi = rxcq->bound_q->index;
432 struct ionic_dev *idev;
433 struct ionic_lif *lif;
434 struct ionic_cq *txcq;
435 u32 work_done = 0;
436 u32 flags = 0;
437
438 lif = rxcq->bound_q->lif;
439 idev = &lif->ionic->idev;
440 txcq = &lif->txqcqs[qi].qcq->cq;
441
442 ionic_tx_flush(txcq);
443
444 work_done = ionic_rx_walk_cq(rxcq, budget);
445
446 if (work_done)
447 ionic_rx_fill_cb(rxcq->bound_q);
448
449 if (work_done < budget && napi_complete_done(napi, work_done)) {
450 flags |= IONIC_INTR_CRED_UNMASK;
451 DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
452 }
453
454 if (work_done || flags) {
455 flags |= IONIC_INTR_CRED_RESET_COALESCE;
456 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
457 work_done, flags);
458 }
459
460 DEBUG_STATS_NAPI_POLL(qcq, work_done);
461
462 return work_done;
463}
464
465static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len)
466{
467 struct ionic_tx_stats *stats = q_to_tx_stats(q);
468 struct device *dev = q->lif->ionic->dev;
469 dma_addr_t dma_addr;
470
471 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
472 if (dma_mapping_error(dev, dma_addr)) {
473 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
474 q->lif->netdev->name, q->name);
475 stats->dma_map_err++;
476 return 0;
477 }
478 return dma_addr;
479}
480
481static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag,
482 size_t offset, size_t len)
483{
484 struct ionic_tx_stats *stats = q_to_tx_stats(q);
485 struct device *dev = q->lif->ionic->dev;
486 dma_addr_t dma_addr;
487
488 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
489 if (dma_mapping_error(dev, dma_addr)) {
490 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
491 q->lif->netdev->name, q->name);
492 stats->dma_map_err++;
493 }
494 return dma_addr;
495}
496
497static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
498 struct ionic_cq_info *cq_info, void *cb_arg)
499{
500 struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
501 struct ionic_txq_sg_elem *elem = sg_desc->elems;
502 struct ionic_tx_stats *stats = q_to_tx_stats(q);
503 struct ionic_txq_desc *desc = desc_info->desc;
504 struct device *dev = q->lif->ionic->dev;
505 u8 opcode, flags, nsge;
506 u16 queue_index;
507 unsigned int i;
508 u64 addr;
509
510 decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
511 &opcode, &flags, &nsge, &addr);
512
513 /* use unmap_single only if either this is not TSO,
514 * or this is first descriptor of a TSO
515 */
516 if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
517 flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
518 dma_unmap_single(dev, (dma_addr_t)addr,
519 le16_to_cpu(desc->len), DMA_TO_DEVICE);
520 else
521 dma_unmap_page(dev, (dma_addr_t)addr,
522 le16_to_cpu(desc->len), DMA_TO_DEVICE);
523
524 for (i = 0; i < nsge; i++, elem++)
525 dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
526 le16_to_cpu(elem->len), DMA_TO_DEVICE);
527
528 if (cb_arg) {
529 struct sk_buff *skb = cb_arg;
530 u32 len = skb->len;
531
532 queue_index = skb_get_queue_mapping(skb);
533 if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
534 queue_index))) {
535 netif_wake_subqueue(q->lif->netdev, queue_index);
536 q->wake++;
537 }
538 dev_kfree_skb_any(skb);
539 stats->clean++;
540 netdev_tx_completed_queue(q_to_ndq(q), 1, len);
541 }
542}
543
544void ionic_tx_flush(struct ionic_cq *cq)
545{
546 struct ionic_txq_comp *comp = cq->tail->cq_desc;
547 struct ionic_dev *idev = &cq->lif->ionic->idev;
548 struct ionic_queue *q = cq->bound_q;
549 struct ionic_desc_info *desc_info;
550 unsigned int work_done = 0;
551
552 /* walk the completed cq entries */
553 while (work_done < cq->num_descs &&
554 color_match(comp->color, cq->done_color)) {
555
556 /* clean the related q entries, there could be
557 * several q entries completed for each cq completion
558 */
559 do {
560 desc_info = q->tail;
561 q->tail = desc_info->next;
562 ionic_tx_clean(q, desc_info, cq->tail,
563 desc_info->cb_arg);
564 desc_info->cb = NULL;
565 desc_info->cb_arg = NULL;
566 } while (desc_info->index != le16_to_cpu(comp->comp_index));
567
568 if (cq->tail->last)
569 cq->done_color = !cq->done_color;
570
571 cq->tail = cq->tail->next;
572 comp = cq->tail->cq_desc;
573 DEBUG_STATS_CQE_CNT(cq);
574
575 work_done++;
576 }
577
578 if (work_done)
579 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
580 work_done, 0);
581}
582
583static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
584{
585 int err;
586
587 err = skb_cow_head(skb, 0);
588 if (err)
589 return err;
590
591 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
592 inner_ip_hdr(skb)->check = 0;
593 inner_tcp_hdr(skb)->check =
594 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
595 inner_ip_hdr(skb)->daddr,
596 0, IPPROTO_TCP, 0);
597 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
598 inner_tcp_hdr(skb)->check =
599 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
600 &inner_ipv6_hdr(skb)->daddr,
601 0, IPPROTO_TCP, 0);
602 }
603
604 return 0;
605}
606
607static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
608{
609 int err;
610
611 err = skb_cow_head(skb, 0);
612 if (err)
613 return err;
614
615 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
616 ip_hdr(skb)->check = 0;
617 tcp_hdr(skb)->check =
618 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
619 ip_hdr(skb)->daddr,
620 0, IPPROTO_TCP, 0);
621 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
622 tcp_hdr(skb)->check =
623 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
624 &ipv6_hdr(skb)->daddr,
625 0, IPPROTO_TCP, 0);
626 }
627
628 return 0;
629}
630
631static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
632 struct sk_buff *skb,
633 dma_addr_t addr, u8 nsge, u16 len,
634 unsigned int hdrlen, unsigned int mss,
635 bool outer_csum,
636 u16 vlan_tci, bool has_vlan,
637 bool start, bool done)
638{
639 u8 flags = 0;
640 u64 cmd;
641
642 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
643 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
644 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
645 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
646
647 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
648 desc->cmd = cpu_to_le64(cmd);
649 desc->len = cpu_to_le16(len);
650 desc->vlan_tci = cpu_to_le16(vlan_tci);
651 desc->hdr_len = cpu_to_le16(hdrlen);
652 desc->mss = cpu_to_le16(mss);
653
654 if (done) {
655 skb_tx_timestamp(skb);
656 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
657 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
658 } else {
659 ionic_txq_post(q, false, ionic_tx_clean, NULL);
660 }
661}
662
663static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
664 struct ionic_txq_sg_elem **elem)
665{
666 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
667 struct ionic_txq_desc *desc = q->head->desc;
668
669 *elem = sg_desc->elems;
670 return desc;
671}
672
673static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
674{
675 struct ionic_tx_stats *stats = q_to_tx_stats(q);
676 struct ionic_desc_info *abort = q->head;
677 struct device *dev = q->lif->ionic->dev;
678 struct ionic_desc_info *rewind = abort;
679 struct ionic_txq_sg_elem *elem;
680 struct ionic_txq_desc *desc;
681 unsigned int frag_left = 0;
682 unsigned int offset = 0;
683 unsigned int len_left;
684 dma_addr_t desc_addr;
685 unsigned int hdrlen;
686 unsigned int nfrags;
687 unsigned int seglen;
688 u64 total_bytes = 0;
689 u64 total_pkts = 0;
690 unsigned int left;
691 unsigned int len;
692 unsigned int mss;
693 skb_frag_t *frag;
694 bool start, done;
695 bool outer_csum;
696 bool has_vlan;
697 u16 desc_len;
698 u8 desc_nsge;
699 u16 vlan_tci;
700 bool encap;
701 int err;
702
703 mss = skb_shinfo(skb)->gso_size;
704 nfrags = skb_shinfo(skb)->nr_frags;
705 len_left = skb->len - skb_headlen(skb);
706 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
707 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
708 has_vlan = !!skb_vlan_tag_present(skb);
709 vlan_tci = skb_vlan_tag_get(skb);
710 encap = skb->encapsulation;
711
712 /* Preload inner-most TCP csum field with IP pseudo hdr
713 * calculated with IP length set to zero. HW will later
714 * add in length to each TCP segment resulting from the TSO.
715 */
716
717 if (encap)
718 err = ionic_tx_tcp_inner_pseudo_csum(skb);
719 else
720 err = ionic_tx_tcp_pseudo_csum(skb);
721 if (err)
722 return err;
723
724 if (encap)
725 hdrlen = skb_inner_transport_header(skb) - skb->data +
726 inner_tcp_hdrlen(skb);
727 else
728 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
729
730 seglen = hdrlen + mss;
731 left = skb_headlen(skb);
732
733 desc = ionic_tx_tso_next(q, &elem);
734 start = true;
735
736 /* Chop skb->data up into desc segments */
737
738 while (left > 0) {
739 len = min(seglen, left);
740 frag_left = seglen - len;
741 desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
742 if (dma_mapping_error(dev, desc_addr))
743 goto err_out_abort;
744 desc_len = len;
745 desc_nsge = 0;
746 left -= len;
747 offset += len;
748 if (nfrags > 0 && frag_left > 0)
749 continue;
750 done = (nfrags == 0 && left == 0);
751 ionic_tx_tso_post(q, desc, skb,
752 desc_addr, desc_nsge, desc_len,
753 hdrlen, mss,
754 outer_csum,
755 vlan_tci, has_vlan,
756 start, done);
757 total_pkts++;
758 total_bytes += start ? len : len + hdrlen;
759 desc = ionic_tx_tso_next(q, &elem);
760 start = false;
761 seglen = mss;
762 }
763
764 /* Chop skb frags into desc segments */
765
766 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
767 offset = 0;
768 left = skb_frag_size(frag);
769 len_left -= left;
770 nfrags--;
771 stats->frags++;
772
773 while (left > 0) {
774 if (frag_left > 0) {
775 len = min(frag_left, left);
776 frag_left -= len;
777 elem->addr =
778 cpu_to_le64(ionic_tx_map_frag(q, frag,
779 offset, len));
780 if (dma_mapping_error(dev, elem->addr))
781 goto err_out_abort;
782 elem->len = cpu_to_le16(len);
783 elem++;
784 desc_nsge++;
785 left -= len;
786 offset += len;
787 if (nfrags > 0 && frag_left > 0)
788 continue;
789 done = (nfrags == 0 && left == 0);
790 ionic_tx_tso_post(q, desc, skb, desc_addr,
791 desc_nsge, desc_len,
792 hdrlen, mss, outer_csum,
793 vlan_tci, has_vlan,
794 start, done);
795 total_pkts++;
796 total_bytes += start ? len : len + hdrlen;
797 desc = ionic_tx_tso_next(q, &elem);
798 start = false;
799 } else {
800 len = min(mss, left);
801 frag_left = mss - len;
802 desc_addr = ionic_tx_map_frag(q, frag,
803 offset, len);
804 if (dma_mapping_error(dev, desc_addr))
805 goto err_out_abort;
806 desc_len = len;
807 desc_nsge = 0;
808 left -= len;
809 offset += len;
810 if (nfrags > 0 && frag_left > 0)
811 continue;
812 done = (nfrags == 0 && left == 0);
813 ionic_tx_tso_post(q, desc, skb, desc_addr,
814 desc_nsge, desc_len,
815 hdrlen, mss, outer_csum,
816 vlan_tci, has_vlan,
817 start, done);
818 total_pkts++;
819 total_bytes += start ? len : len + hdrlen;
820 desc = ionic_tx_tso_next(q, &elem);
821 start = false;
822 }
823 }
824 }
825
826 stats->pkts += total_pkts;
827 stats->bytes += total_bytes;
828 stats->tso++;
829
830 return 0;
831
832err_out_abort:
833 while (rewind->desc != q->head->desc) {
834 ionic_tx_clean(q, rewind, NULL, NULL);
835 rewind = rewind->next;
836 }
837 q->head = abort;
838
839 return -ENOMEM;
840}
841
842static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
843{
844 struct ionic_tx_stats *stats = q_to_tx_stats(q);
845 struct ionic_txq_desc *desc = q->head->desc;
846 struct device *dev = q->lif->ionic->dev;
847 dma_addr_t dma_addr;
848 bool has_vlan;
849 u8 flags = 0;
850 bool encap;
851 u64 cmd;
852
853 has_vlan = !!skb_vlan_tag_present(skb);
854 encap = skb->encapsulation;
855
856 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
857 if (dma_mapping_error(dev, dma_addr))
858 return -ENOMEM;
859
860 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
861 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
862
863 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
864 flags, skb_shinfo(skb)->nr_frags, dma_addr);
865 desc->cmd = cpu_to_le64(cmd);
866 desc->len = cpu_to_le16(skb_headlen(skb));
867 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
868 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
869 desc->csum_offset = cpu_to_le16(skb->csum_offset);
870
871 if (skb->csum_not_inet)
872 stats->crc32_csum++;
873 else
874 stats->csum++;
875
876 return 0;
877}
878
879static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
880{
881 struct ionic_tx_stats *stats = q_to_tx_stats(q);
882 struct ionic_txq_desc *desc = q->head->desc;
883 struct device *dev = q->lif->ionic->dev;
884 dma_addr_t dma_addr;
885 bool has_vlan;
886 u8 flags = 0;
887 bool encap;
888 u64 cmd;
889
890 has_vlan = !!skb_vlan_tag_present(skb);
891 encap = skb->encapsulation;
892
893 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
894 if (dma_mapping_error(dev, dma_addr))
895 return -ENOMEM;
896
897 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
898 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
899
900 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
901 flags, skb_shinfo(skb)->nr_frags, dma_addr);
902 desc->cmd = cpu_to_le64(cmd);
903 desc->len = cpu_to_le16(skb_headlen(skb));
904 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
905
906 stats->no_csum++;
907
908 return 0;
909}
910
911static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
912{
913 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
914 unsigned int len_left = skb->len - skb_headlen(skb);
915 struct ionic_txq_sg_elem *elem = sg_desc->elems;
916 struct ionic_tx_stats *stats = q_to_tx_stats(q);
917 struct device *dev = q->lif->ionic->dev;
918 dma_addr_t dma_addr;
919 skb_frag_t *frag;
920 u16 len;
921
922 for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
923 len = skb_frag_size(frag);
924 elem->len = cpu_to_le16(len);
925 dma_addr = ionic_tx_map_frag(q, frag, 0, len);
926 if (dma_mapping_error(dev, dma_addr))
927 return -ENOMEM;
928 elem->addr = cpu_to_le64(dma_addr);
929 len_left -= len;
930 stats->frags++;
931 }
932
933 return 0;
934}
935
936static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
937{
938 struct ionic_tx_stats *stats = q_to_tx_stats(q);
939 int err;
940
941 /* set up the initial descriptor */
942 if (skb->ip_summed == CHECKSUM_PARTIAL)
943 err = ionic_tx_calc_csum(q, skb);
944 else
945 err = ionic_tx_calc_no_csum(q, skb);
946 if (err)
947 return err;
948
949 /* add frags */
950 err = ionic_tx_skb_frags(q, skb);
951 if (err)
952 return err;
953
954 skb_tx_timestamp(skb);
955 stats->pkts++;
956 stats->bytes += skb->len;
957
958 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
959 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
960
961 return 0;
962}
963
964static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
965{
966 struct ionic_tx_stats *stats = q_to_tx_stats(q);
967 int err;
968
969 /* If TSO, need roundup(skb->len/mss) descs */
970 if (skb_is_gso(skb))
971 return (skb->len / skb_shinfo(skb)->gso_size) + 1;
972
973 /* If non-TSO, just need 1 desc and nr_frags sg elems */
974 if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
975 return 1;
976
977 /* Too many frags, so linearize */
978 err = skb_linearize(skb);
979 if (err)
980 return err;
981
982 stats->linearize++;
983
984 /* Need 1 desc and zero sg elems */
985 return 1;
986}
987
988static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
989{
990 int stopped = 0;
991
992 if (unlikely(!ionic_q_has_space(q, ndescs))) {
993 netif_stop_subqueue(q->lif->netdev, q->index);
994 q->stop++;
995 stopped = 1;
996
997 /* Might race with ionic_tx_clean, check again */
998 smp_rmb();
999 if (ionic_q_has_space(q, ndescs)) {
1000 netif_wake_subqueue(q->lif->netdev, q->index);
1001 stopped = 0;
1002 }
1003 }
1004
1005 return stopped;
1006}
1007
1008netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1009{
1010 u16 queue_index = skb_get_queue_mapping(skb);
1011 struct ionic_lif *lif = netdev_priv(netdev);
1012 struct ionic_queue *q;
1013 int ndescs;
1014 int err;
1015
1016 if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) {
1017 dev_kfree_skb(skb);
1018 return NETDEV_TX_OK;
1019 }
1020
1021 if (unlikely(!lif_to_txqcq(lif, queue_index)))
1022 queue_index = 0;
1023 q = lif_to_txq(lif, queue_index);
1024
1025 ndescs = ionic_tx_descs_needed(q, skb);
1026 if (ndescs < 0)
1027 goto err_out_drop;
1028
1029 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1030 return NETDEV_TX_BUSY;
1031
1032 if (skb_is_gso(skb))
1033 err = ionic_tx_tso(q, skb);
1034 else
1035 err = ionic_tx(q, skb);
1036
1037 if (err)
1038 goto err_out_drop;
1039
1040 /* Stop the queue if there aren't descriptors for the next packet.
1041 * Since our SG lists per descriptor take care of most of the possible
1042 * fragmentation, we don't need to have many descriptors available.
1043 */
1044 ionic_maybe_stop_tx(q, 4);
1045
1046 return NETDEV_TX_OK;
1047
1048err_out_drop:
1049 q->stop++;
1050 q->drop++;
1051 dev_kfree_skb(skb);
1052 return NETDEV_TX_OK;
1053}