Commit | Line | Data |
---|---|---|
0f3154e6 SN |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ | |
3 | ||
4 | #include <linux/ip.h> | |
5 | #include <linux/ipv6.h> | |
6 | #include <linux/if_vlan.h> | |
7 | #include <net/ip6_checksum.h> | |
8 | ||
9 | #include "ionic.h" | |
10 | #include "ionic_lif.h" | |
11 | #include "ionic_txrx.h" | |
12 | ||
0f3154e6 SN |
13 | static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, |
14 | ionic_desc_cb cb_func, void *cb_arg) | |
15 | { | |
0f3154e6 SN |
16 | ionic_q_post(q, ring_dbell, cb_func, cb_arg); |
17 | } | |
18 | ||
19 | static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, | |
20 | ionic_desc_cb cb_func, void *cb_arg) | |
21 | { | |
22 | ionic_q_post(q, ring_dbell, cb_func, cb_arg); | |
0f3154e6 SN |
23 | } |
24 | ||
b69585bf AH |
25 | bool ionic_txq_poke_doorbell(struct ionic_queue *q) |
26 | { | |
27 | unsigned long now, then, dif; | |
28 | struct netdev_queue *netdev_txq; | |
29 | struct net_device *netdev; | |
30 | ||
31 | netdev = q->lif->netdev; | |
32 | netdev_txq = netdev_get_tx_queue(netdev, q->index); | |
33 | ||
34 | HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id()); | |
35 | ||
36 | if (q->tail_idx == q->head_idx) { | |
37 | HARD_TX_UNLOCK(netdev, netdev_txq); | |
38 | return false; | |
39 | } | |
40 | ||
41 | now = READ_ONCE(jiffies); | |
42 | then = q->dbell_jiffies; | |
43 | dif = now - then; | |
44 | ||
45 | if (dif > q->dbell_deadline) { | |
46 | ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, | |
47 | q->dbval | q->head_idx); | |
48 | ||
49 | q->dbell_jiffies = now; | |
50 | } | |
51 | ||
52 | HARD_TX_UNLOCK(netdev, netdev_txq); | |
53 | ||
54 | return true; | |
55 | } | |
56 | ||
57 | bool ionic_rxq_poke_doorbell(struct ionic_queue *q) | |
58 | { | |
59 | unsigned long now, then, dif; | |
60 | ||
61 | /* no lock, called from rx napi or txrx napi, nothing else can fill */ | |
62 | ||
63 | if (q->tail_idx == q->head_idx) | |
64 | return false; | |
65 | ||
66 | now = READ_ONCE(jiffies); | |
67 | then = q->dbell_jiffies; | |
68 | dif = now - then; | |
69 | ||
70 | if (dif > q->dbell_deadline) { | |
71 | ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, | |
72 | q->dbval | q->head_idx); | |
73 | ||
74 | q->dbell_jiffies = now; | |
75 | ||
76 | dif = 2 * q->dbell_deadline; | |
77 | if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE) | |
78 | dif = IONIC_RX_MAX_DOORBELL_DEADLINE; | |
79 | ||
80 | q->dbell_deadline = dif; | |
81 | } | |
82 | ||
83 | return true; | |
84 | } | |
85 | ||
0f3154e6 SN |
86 | static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) |
87 | { | |
88 | return netdev_get_tx_queue(q->lif->netdev, q->index); | |
89 | } | |
90 | ||
2b5720f2 | 91 | static int ionic_rx_page_alloc(struct ionic_queue *q, |
4b0a7539 | 92 | struct ionic_buf_info *buf_info) |
2b5720f2 | 93 | { |
89e572e7 | 94 | struct net_device *netdev = q->lif->netdev; |
2b5720f2 | 95 | struct ionic_rx_stats *stats; |
2b5720f2 | 96 | struct device *dev; |
e75ccac1 | 97 | struct page *page; |
2b5720f2 | 98 | |
f37bc346 | 99 | dev = q->dev; |
2b5720f2 SN |
100 | stats = q_to_rx_stats(q); |
101 | ||
4b0a7539 SN |
102 | if (unlikely(!buf_info)) { |
103 | net_err_ratelimited("%s: %s invalid buf_info in alloc\n", | |
2b5720f2 SN |
104 | netdev->name, q->name); |
105 | return -EINVAL; | |
106 | } | |
107 | ||
e75ccac1 SN |
108 | page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); |
109 | if (unlikely(!page)) { | |
2b5720f2 SN |
110 | net_err_ratelimited("%s: %s page alloc failed\n", |
111 | netdev->name, q->name); | |
112 | stats->alloc_err++; | |
113 | return -ENOMEM; | |
114 | } | |
115 | ||
e75ccac1 | 116 | buf_info->dma_addr = dma_map_page(dev, page, 0, |
4b0a7539 SN |
117 | IONIC_PAGE_SIZE, DMA_FROM_DEVICE); |
118 | if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { | |
e75ccac1 | 119 | __free_pages(page, 0); |
2b5720f2 SN |
120 | net_err_ratelimited("%s: %s dma map failed\n", |
121 | netdev->name, q->name); | |
122 | stats->dma_map_err++; | |
123 | return -EIO; | |
124 | } | |
125 | ||
e75ccac1 SN |
126 | buf_info->page = page; |
127 | buf_info->page_offset = 0; | |
128 | ||
2b5720f2 SN |
129 | return 0; |
130 | } | |
131 | ||
132 | static void ionic_rx_page_free(struct ionic_queue *q, | |
4b0a7539 | 133 | struct ionic_buf_info *buf_info) |
2b5720f2 | 134 | { |
4b0a7539 | 135 | struct net_device *netdev = q->lif->netdev; |
f37bc346 | 136 | struct device *dev = q->dev; |
2b5720f2 | 137 | |
4b0a7539 SN |
138 | if (unlikely(!buf_info)) { |
139 | net_err_ratelimited("%s: %s invalid buf_info in free\n", | |
2b5720f2 SN |
140 | netdev->name, q->name); |
141 | return; | |
142 | } | |
143 | ||
4b0a7539 | 144 | if (!buf_info->page) |
2b5720f2 | 145 | return; |
2b5720f2 | 146 | |
4b0a7539 SN |
147 | dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); |
148 | __free_pages(buf_info->page, 0); | |
e75ccac1 | 149 | buf_info->page = NULL; |
4b0a7539 SN |
150 | } |
151 | ||
152 | static bool ionic_rx_buf_recycle(struct ionic_queue *q, | |
153 | struct ionic_buf_info *buf_info, u32 used) | |
154 | { | |
155 | u32 size; | |
156 | ||
157 | /* don't re-use pages allocated in low-mem condition */ | |
158 | if (page_is_pfmemalloc(buf_info->page)) | |
159 | return false; | |
160 | ||
161 | /* don't re-use buffers from non-local numa nodes */ | |
162 | if (page_to_nid(buf_info->page) != numa_mem_id()) | |
163 | return false; | |
164 | ||
165 | size = ALIGN(used, IONIC_PAGE_SPLIT_SZ); | |
166 | buf_info->page_offset += size; | |
167 | if (buf_info->page_offset >= IONIC_PAGE_SIZE) | |
168 | return false; | |
169 | ||
170 | get_page(buf_info->page); | |
2b5720f2 | 171 | |
4b0a7539 | 172 | return true; |
2b5720f2 SN |
173 | } |
174 | ||
08f2e4b2 SN |
175 | static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, |
176 | struct ionic_desc_info *desc_info, | |
a25edab9 | 177 | struct ionic_rxq_comp *comp) |
0f3154e6 | 178 | { |
89e572e7 | 179 | struct net_device *netdev = q->lif->netdev; |
4b0a7539 | 180 | struct ionic_buf_info *buf_info; |
89e572e7 | 181 | struct ionic_rx_stats *stats; |
f37bc346 | 182 | struct device *dev = q->dev; |
08f2e4b2 SN |
183 | struct sk_buff *skb; |
184 | unsigned int i; | |
185 | u16 frag_len; | |
186 | u16 len; | |
0f3154e6 | 187 | |
89e572e7 SN |
188 | stats = q_to_rx_stats(q); |
189 | ||
4b0a7539 | 190 | buf_info = &desc_info->bufs[0]; |
08f2e4b2 | 191 | len = le16_to_cpu(comp->len); |
0f3154e6 | 192 | |
e75ccac1 | 193 | prefetchw(buf_info->page); |
0f3154e6 | 194 | |
89e572e7 SN |
195 | skb = napi_get_frags(&q_to_qcq(q)->napi); |
196 | if (unlikely(!skb)) { | |
197 | net_warn_ratelimited("%s: SKB alloc failed on %s!\n", | |
198 | netdev->name, q->name); | |
199 | stats->alloc_err++; | |
08f2e4b2 | 200 | return NULL; |
89e572e7 | 201 | } |
0f3154e6 | 202 | |
08f2e4b2 SN |
203 | i = comp->num_sg_elems + 1; |
204 | do { | |
4b0a7539 | 205 | if (unlikely(!buf_info->page)) { |
08f2e4b2 SN |
206 | dev_kfree_skb(skb); |
207 | return NULL; | |
208 | } | |
209 | ||
8f6b846b DC |
210 | frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN, |
211 | IONIC_PAGE_SIZE - buf_info->page_offset)); | |
08f2e4b2 SN |
212 | len -= frag_len; |
213 | ||
4b0a7539 SN |
214 | dma_sync_single_for_cpu(dev, |
215 | buf_info->dma_addr + buf_info->page_offset, | |
216 | frag_len, DMA_FROM_DEVICE); | |
217 | ||
08f2e4b2 | 218 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
4b0a7539 SN |
219 | buf_info->page, buf_info->page_offset, frag_len, |
220 | IONIC_PAGE_SIZE); | |
221 | ||
222 | if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { | |
223 | dma_unmap_page(dev, buf_info->dma_addr, | |
224 | IONIC_PAGE_SIZE, DMA_FROM_DEVICE); | |
e75ccac1 | 225 | buf_info->page = NULL; |
4b0a7539 SN |
226 | } |
227 | ||
228 | buf_info++; | |
229 | ||
08f2e4b2 SN |
230 | i--; |
231 | } while (i > 0); | |
232 | ||
233 | return skb; | |
234 | } | |
235 | ||
236 | static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, | |
237 | struct ionic_desc_info *desc_info, | |
a25edab9 | 238 | struct ionic_rxq_comp *comp) |
08f2e4b2 | 239 | { |
89e572e7 | 240 | struct net_device *netdev = q->lif->netdev; |
4b0a7539 | 241 | struct ionic_buf_info *buf_info; |
89e572e7 | 242 | struct ionic_rx_stats *stats; |
f37bc346 | 243 | struct device *dev = q->dev; |
08f2e4b2 SN |
244 | struct sk_buff *skb; |
245 | u16 len; | |
246 | ||
89e572e7 SN |
247 | stats = q_to_rx_stats(q); |
248 | ||
4b0a7539 | 249 | buf_info = &desc_info->bufs[0]; |
08f2e4b2 SN |
250 | len = le16_to_cpu(comp->len); |
251 | ||
89e572e7 SN |
252 | skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); |
253 | if (unlikely(!skb)) { | |
254 | net_warn_ratelimited("%s: SKB alloc failed on %s!\n", | |
255 | netdev->name, q->name); | |
256 | stats->alloc_err++; | |
08f2e4b2 | 257 | return NULL; |
89e572e7 | 258 | } |
08f2e4b2 | 259 | |
4b0a7539 | 260 | if (unlikely(!buf_info->page)) { |
08f2e4b2 SN |
261 | dev_kfree_skb(skb); |
262 | return NULL; | |
263 | } | |
264 | ||
4b0a7539 | 265 | dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, |
08f2e4b2 | 266 | len, DMA_FROM_DEVICE); |
4b0a7539 SN |
267 | skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len); |
268 | dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, | |
08f2e4b2 SN |
269 | len, DMA_FROM_DEVICE); |
270 | ||
271 | skb_put(skb, len); | |
272 | skb->protocol = eth_type_trans(skb, q->lif->netdev); | |
273 | ||
274 | return skb; | |
0f3154e6 SN |
275 | } |
276 | ||
5b3f3f2a SN |
277 | static void ionic_rx_clean(struct ionic_queue *q, |
278 | struct ionic_desc_info *desc_info, | |
279 | struct ionic_cq_info *cq_info, | |
280 | void *cb_arg) | |
0f3154e6 | 281 | { |
89e572e7 | 282 | struct net_device *netdev = q->lif->netdev; |
0f3154e6 | 283 | struct ionic_qcq *qcq = q_to_qcq(q); |
0f3154e6 | 284 | struct ionic_rx_stats *stats; |
0ec9f666 | 285 | struct ionic_rxq_comp *comp; |
08f2e4b2 | 286 | struct sk_buff *skb; |
0f3154e6 | 287 | |
0ec9f666 SN |
288 | comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp); |
289 | ||
0f3154e6 | 290 | stats = q_to_rx_stats(q); |
0f3154e6 | 291 | |
24cfa8c7 SN |
292 | if (comp->status) { |
293 | stats->dropped++; | |
0f3154e6 | 294 | return; |
24cfa8c7 | 295 | } |
0f3154e6 | 296 | |
0f3154e6 SN |
297 | stats->pkts++; |
298 | stats->bytes += le16_to_cpu(comp->len); | |
299 | ||
08f2e4b2 | 300 | if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) |
a25edab9 | 301 | skb = ionic_rx_copybreak(q, desc_info, comp); |
08f2e4b2 | 302 | else |
a25edab9 | 303 | skb = ionic_rx_frags(q, desc_info, comp); |
0f3154e6 | 304 | |
24cfa8c7 SN |
305 | if (unlikely(!skb)) { |
306 | stats->dropped++; | |
08f2e4b2 | 307 | return; |
24cfa8c7 | 308 | } |
0f3154e6 SN |
309 | |
310 | skb_record_rx_queue(skb, q->index); | |
311 | ||
08f2e4b2 | 312 | if (likely(netdev->features & NETIF_F_RXHASH)) { |
0f3154e6 SN |
313 | switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { |
314 | case IONIC_PKT_TYPE_IPV4: | |
315 | case IONIC_PKT_TYPE_IPV6: | |
316 | skb_set_hash(skb, le32_to_cpu(comp->rss_hash), | |
317 | PKT_HASH_TYPE_L3); | |
318 | break; | |
319 | case IONIC_PKT_TYPE_IPV4_TCP: | |
320 | case IONIC_PKT_TYPE_IPV6_TCP: | |
321 | case IONIC_PKT_TYPE_IPV4_UDP: | |
322 | case IONIC_PKT_TYPE_IPV6_UDP: | |
323 | skb_set_hash(skb, le32_to_cpu(comp->rss_hash), | |
324 | PKT_HASH_TYPE_L4); | |
325 | break; | |
326 | } | |
327 | } | |
328 | ||
f07f9815 SN |
329 | if (likely(netdev->features & NETIF_F_RXCSUM) && |
330 | (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) { | |
331 | skb->ip_summed = CHECKSUM_COMPLETE; | |
332 | skb->csum = (__force __wsum)le16_to_cpu(comp->csum); | |
333 | stats->csum_complete++; | |
0f3154e6 SN |
334 | } else { |
335 | stats->csum_none++; | |
336 | } | |
337 | ||
08f2e4b2 SN |
338 | if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || |
339 | (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || | |
340 | (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))) | |
0f3154e6 SN |
341 | stats->csum_error++; |
342 | ||
f64e0c56 SN |
343 | if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
344 | (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) { | |
345 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
346 | le16_to_cpu(comp->vlan_tci)); | |
347 | stats->vlan_stripped++; | |
0f3154e6 SN |
348 | } |
349 | ||
a8771bfe SN |
350 | if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) { |
351 | __le64 *cq_desc_hwstamp; | |
352 | u64 hwstamp; | |
353 | ||
354 | cq_desc_hwstamp = | |
355 | cq_info->cq_desc + | |
356 | qcq->cq.desc_size - | |
357 | sizeof(struct ionic_rxq_comp) - | |
358 | IONIC_HWSTAMP_CQ_NEGOFFSET; | |
359 | ||
360 | hwstamp = le64_to_cpu(*cq_desc_hwstamp); | |
361 | ||
362 | if (hwstamp != IONIC_HWSTAMP_INVALID) { | |
363 | skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); | |
364 | stats->hwstamp_valid++; | |
365 | } else { | |
366 | stats->hwstamp_invalid++; | |
367 | } | |
368 | } | |
369 | ||
08f2e4b2 SN |
370 | if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) |
371 | napi_gro_receive(&qcq->napi, skb); | |
372 | else | |
373 | napi_gro_frags(&qcq->napi); | |
0f3154e6 SN |
374 | } |
375 | ||
a8771bfe | 376 | bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) |
0f3154e6 | 377 | { |
0f3154e6 SN |
378 | struct ionic_queue *q = cq->bound_q; |
379 | struct ionic_desc_info *desc_info; | |
0ec9f666 SN |
380 | struct ionic_rxq_comp *comp; |
381 | ||
382 | comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); | |
0f3154e6 SN |
383 | |
384 | if (!color_match(comp->pkt_type_color, cq->done_color)) | |
385 | return false; | |
386 | ||
387 | /* check for empty queue */ | |
f1d2e894 | 388 | if (q->tail_idx == q->head_idx) |
0f3154e6 SN |
389 | return false; |
390 | ||
339dcf7f | 391 | if (q->tail_idx != le16_to_cpu(comp->comp_index)) |
0f3154e6 SN |
392 | return false; |
393 | ||
339dcf7f | 394 | desc_info = &q->info[q->tail_idx]; |
f1d2e894 | 395 | q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); |
0f3154e6 SN |
396 | |
397 | /* clean the related q entry, only one per qc completion */ | |
398 | ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); | |
399 | ||
400 | desc_info->cb = NULL; | |
401 | desc_info->cb_arg = NULL; | |
402 | ||
403 | return true; | |
404 | } | |
405 | ||
40bc471d SN |
406 | static inline void ionic_write_cmb_desc(struct ionic_queue *q, |
407 | void __iomem *cmb_desc, | |
408 | void *desc) | |
409 | { | |
410 | if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS) | |
411 | memcpy_toio(cmb_desc, desc, q->desc_size); | |
412 | } | |
413 | ||
0f3154e6 SN |
414 | void ionic_rx_fill(struct ionic_queue *q) |
415 | { | |
416 | struct net_device *netdev = q->lif->netdev; | |
08f2e4b2 | 417 | struct ionic_desc_info *desc_info; |
08f2e4b2 SN |
418 | struct ionic_rxq_sg_desc *sg_desc; |
419 | struct ionic_rxq_sg_elem *sg_elem; | |
4b0a7539 | 420 | struct ionic_buf_info *buf_info; |
e55f0f5b | 421 | unsigned int fill_threshold; |
0f3154e6 | 422 | struct ionic_rxq_desc *desc; |
c37d6e3f | 423 | unsigned int remain_len; |
4b0a7539 | 424 | unsigned int frag_len; |
08f2e4b2 | 425 | unsigned int nfrags; |
e55f0f5b | 426 | unsigned int n_fill; |
08f2e4b2 | 427 | unsigned int i, j; |
0f3154e6 | 428 | unsigned int len; |
0f3154e6 | 429 | |
e55f0f5b NP |
430 | n_fill = ionic_q_space_avail(q); |
431 | ||
432 | fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD, | |
433 | q->num_descs / IONIC_RX_FILL_DIV); | |
434 | if (n_fill < fill_threshold) | |
435 | return; | |
436 | ||
83469893 | 437 | len = netdev->mtu + ETH_HLEN + VLAN_HLEN; |
0f3154e6 | 438 | |
e55f0f5b | 439 | for (i = n_fill; i; i--) { |
4b0a7539 | 440 | nfrags = 0; |
c37d6e3f | 441 | remain_len = len; |
f1d2e894 | 442 | desc_info = &q->info[q->head_idx]; |
08f2e4b2 | 443 | desc = desc_info->desc; |
4b0a7539 | 444 | buf_info = &desc_info->bufs[0]; |
0f3154e6 | 445 | |
4b0a7539 SN |
446 | if (!buf_info->page) { /* alloc a new buffer? */ |
447 | if (unlikely(ionic_rx_page_alloc(q, buf_info))) { | |
448 | desc->addr = 0; | |
449 | desc->len = 0; | |
450 | return; | |
451 | } | |
08f2e4b2 | 452 | } |
08f2e4b2 | 453 | |
4b0a7539 SN |
454 | /* fill main descriptor - buf[0] */ |
455 | desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); | |
8f6b846b DC |
456 | frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN, |
457 | IONIC_PAGE_SIZE - buf_info->page_offset)); | |
4b0a7539 SN |
458 | desc->len = cpu_to_le16(frag_len); |
459 | remain_len -= frag_len; | |
460 | buf_info++; | |
461 | nfrags++; | |
08f2e4b2 | 462 | |
4b0a7539 SN |
463 | /* fill sg descriptors - buf[1..n] */ |
464 | sg_desc = desc_info->sg_desc; | |
f37bc346 | 465 | for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { |
08f2e4b2 | 466 | sg_elem = &sg_desc->elems[j]; |
4b0a7539 SN |
467 | if (!buf_info->page) { /* alloc a new sg buffer? */ |
468 | if (unlikely(ionic_rx_page_alloc(q, buf_info))) { | |
469 | sg_elem->addr = 0; | |
470 | sg_elem->len = 0; | |
471 | return; | |
472 | } | |
08f2e4b2 | 473 | } |
4b0a7539 SN |
474 | |
475 | sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); | |
8f6b846b DC |
476 | frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN, |
477 | IONIC_PAGE_SIZE - | |
478 | buf_info->page_offset)); | |
4b0a7539 SN |
479 | sg_elem->len = cpu_to_le16(frag_len); |
480 | remain_len -= frag_len; | |
481 | buf_info++; | |
482 | nfrags++; | |
08f2e4b2 | 483 | } |
0f3154e6 | 484 | |
4b0a7539 | 485 | /* clear end sg element as a sentinel */ |
f37bc346 | 486 | if (j < q->max_sg_elems) { |
4b0a7539 SN |
487 | sg_elem = &sg_desc->elems[j]; |
488 | memset(sg_elem, 0, sizeof(*sg_elem)); | |
489 | } | |
490 | ||
491 | desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : | |
492 | IONIC_RXQ_DESC_OPCODE_SIMPLE; | |
493 | desc_info->nbufs = nfrags; | |
494 | ||
40bc471d SN |
495 | ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); |
496 | ||
155f15ad | 497 | ionic_rxq_post(q, false, ionic_rx_clean, NULL); |
0f3154e6 | 498 | } |
155f15ad SN |
499 | |
500 | ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, | |
f1d2e894 | 501 | q->dbval | q->head_idx); |
b69585bf AH |
502 | |
503 | q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; | |
504 | q->dbell_jiffies = jiffies; | |
505 | ||
506 | mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, | |
507 | jiffies + IONIC_NAPI_DEADLINE); | |
0f3154e6 SN |
508 | } |
509 | ||
0f3154e6 SN |
510 | void ionic_rx_empty(struct ionic_queue *q) |
511 | { | |
f1d2e894 | 512 | struct ionic_desc_info *desc_info; |
4b0a7539 | 513 | struct ionic_buf_info *buf_info; |
0c32a28e | 514 | unsigned int i, j; |
08f2e4b2 | 515 | |
0c32a28e SN |
516 | for (i = 0; i < q->num_descs; i++) { |
517 | desc_info = &q->info[i]; | |
518 | for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { | |
4b0a7539 SN |
519 | buf_info = &desc_info->bufs[j]; |
520 | if (buf_info->page) | |
521 | ionic_rx_page_free(q, buf_info); | |
0c32a28e | 522 | } |
08f2e4b2 | 523 | |
4b0a7539 | 524 | desc_info->nbufs = 0; |
0c32a28e | 525 | desc_info->cb = NULL; |
f1d2e894 | 526 | desc_info->cb_arg = NULL; |
0f3154e6 | 527 | } |
4b0a7539 SN |
528 | |
529 | q->head_idx = 0; | |
530 | q->tail_idx = 0; | |
0f3154e6 SN |
531 | } |
532 | ||
76ed8a4a | 533 | static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode) |
04a83459 SN |
534 | { |
535 | struct dim_sample dim_sample; | |
536 | struct ionic_lif *lif; | |
537 | unsigned int qi; | |
76ed8a4a | 538 | u64 pkts, bytes; |
04a83459 SN |
539 | |
540 | if (!qcq->intr.dim_coal_hw) | |
541 | return; | |
542 | ||
543 | lif = qcq->q.lif; | |
544 | qi = qcq->cq.bound_q->index; | |
545 | ||
76ed8a4a SN |
546 | switch (napi_mode) { |
547 | case IONIC_LIF_F_TX_DIM_INTR: | |
548 | pkts = lif->txqstats[qi].pkts; | |
549 | bytes = lif->txqstats[qi].bytes; | |
550 | break; | |
551 | case IONIC_LIF_F_RX_DIM_INTR: | |
552 | pkts = lif->rxqstats[qi].pkts; | |
553 | bytes = lif->rxqstats[qi].bytes; | |
554 | break; | |
555 | default: | |
556 | pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts; | |
557 | bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes; | |
558 | break; | |
559 | } | |
04a83459 SN |
560 | |
561 | dim_update_sample(qcq->cq.bound_intr->rearm_count, | |
76ed8a4a | 562 | pkts, bytes, &dim_sample); |
04a83459 SN |
563 | |
564 | net_dim(&qcq->dim, dim_sample); | |
565 | } | |
566 | ||
fe8c30b5 SN |
567 | int ionic_tx_napi(struct napi_struct *napi, int budget) |
568 | { | |
569 | struct ionic_qcq *qcq = napi_to_qcq(napi); | |
570 | struct ionic_cq *cq = napi_to_cq(napi); | |
571 | struct ionic_dev *idev; | |
572 | struct ionic_lif *lif; | |
573 | u32 work_done = 0; | |
574 | u32 flags = 0; | |
575 | ||
576 | lif = cq->bound_q->lif; | |
577 | idev = &lif->ionic->idev; | |
578 | ||
579 | work_done = ionic_cq_service(cq, budget, | |
580 | ionic_tx_service, NULL, NULL); | |
581 | ||
582 | if (work_done < budget && napi_complete_done(napi, work_done)) { | |
76ed8a4a | 583 | ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); |
fe8c30b5 | 584 | flags |= IONIC_INTR_CRED_UNMASK; |
04a83459 | 585 | cq->bound_intr->rearm_count++; |
fe8c30b5 SN |
586 | } |
587 | ||
588 | if (work_done || flags) { | |
589 | flags |= IONIC_INTR_CRED_RESET_COALESCE; | |
590 | ionic_intr_credits(idev->intr_ctrl, | |
591 | cq->bound_intr->index, | |
592 | work_done, flags); | |
593 | } | |
594 | ||
b69585bf AH |
595 | if (!work_done && ionic_txq_poke_doorbell(&qcq->q)) |
596 | mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); | |
597 | ||
fe8c30b5 SN |
598 | return work_done; |
599 | } | |
600 | ||
0f3154e6 | 601 | int ionic_rx_napi(struct napi_struct *napi, int budget) |
fe8c30b5 SN |
602 | { |
603 | struct ionic_qcq *qcq = napi_to_qcq(napi); | |
604 | struct ionic_cq *cq = napi_to_cq(napi); | |
605 | struct ionic_dev *idev; | |
606 | struct ionic_lif *lif; | |
607 | u32 work_done = 0; | |
608 | u32 flags = 0; | |
609 | ||
610 | lif = cq->bound_q->lif; | |
611 | idev = &lif->ionic->idev; | |
612 | ||
613 | work_done = ionic_cq_service(cq, budget, | |
614 | ionic_rx_service, NULL, NULL); | |
615 | ||
e55f0f5b | 616 | ionic_rx_fill(cq->bound_q); |
fe8c30b5 SN |
617 | |
618 | if (work_done < budget && napi_complete_done(napi, work_done)) { | |
76ed8a4a | 619 | ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); |
fe8c30b5 | 620 | flags |= IONIC_INTR_CRED_UNMASK; |
04a83459 | 621 | cq->bound_intr->rearm_count++; |
fe8c30b5 SN |
622 | } |
623 | ||
624 | if (work_done || flags) { | |
625 | flags |= IONIC_INTR_CRED_RESET_COALESCE; | |
626 | ionic_intr_credits(idev->intr_ctrl, | |
627 | cq->bound_intr->index, | |
628 | work_done, flags); | |
629 | } | |
630 | ||
b69585bf AH |
631 | if (!work_done && ionic_rxq_poke_doorbell(&qcq->q)) |
632 | mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); | |
633 | ||
fe8c30b5 SN |
634 | return work_done; |
635 | } | |
636 | ||
637 | int ionic_txrx_napi(struct napi_struct *napi, int budget) | |
0f3154e6 | 638 | { |
b69585bf | 639 | struct ionic_qcq *rxqcq = napi_to_qcq(napi); |
0f3154e6 SN |
640 | struct ionic_cq *rxcq = napi_to_cq(napi); |
641 | unsigned int qi = rxcq->bound_q->index; | |
b69585bf | 642 | struct ionic_qcq *txqcq; |
0f3154e6 SN |
643 | struct ionic_dev *idev; |
644 | struct ionic_lif *lif; | |
645 | struct ionic_cq *txcq; | |
b69585bf | 646 | bool resched = false; |
b14e4e95 SN |
647 | u32 rx_work_done = 0; |
648 | u32 tx_work_done = 0; | |
0f3154e6 SN |
649 | u32 flags = 0; |
650 | ||
651 | lif = rxcq->bound_q->lif; | |
652 | idev = &lif->ionic->idev; | |
b69585bf | 653 | txqcq = lif->txqcqs[qi]; |
34dec947 | 654 | txcq = &lif->txqcqs[qi]->cq; |
0f3154e6 | 655 | |
f37bc346 | 656 | tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, |
b14e4e95 | 657 | ionic_tx_service, NULL, NULL); |
0f3154e6 | 658 | |
b14e4e95 SN |
659 | rx_work_done = ionic_cq_service(rxcq, budget, |
660 | ionic_rx_service, NULL, NULL); | |
a8205ab6 | 661 | |
e55f0f5b | 662 | ionic_rx_fill(rxcq->bound_q); |
0f3154e6 | 663 | |
9dda5110 | 664 | if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { |
b69585bf | 665 | ionic_dim_update(rxqcq, 0); |
0f3154e6 | 666 | flags |= IONIC_INTR_CRED_UNMASK; |
04a83459 | 667 | rxcq->bound_intr->rearm_count++; |
0f3154e6 SN |
668 | } |
669 | ||
9dda5110 | 670 | if (rx_work_done || flags) { |
0f3154e6 SN |
671 | flags |= IONIC_INTR_CRED_RESET_COALESCE; |
672 | ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, | |
b14e4e95 | 673 | tx_work_done + rx_work_done, flags); |
0f3154e6 SN |
674 | } |
675 | ||
b69585bf AH |
676 | if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q)) |
677 | resched = true; | |
678 | if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q)) | |
679 | resched = true; | |
680 | if (resched) | |
681 | mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); | |
682 | ||
9dda5110 | 683 | return rx_work_done; |
0f3154e6 SN |
684 | } |
685 | ||
5b3f3f2a SN |
686 | static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, |
687 | void *data, size_t len) | |
0f3154e6 SN |
688 | { |
689 | struct ionic_tx_stats *stats = q_to_tx_stats(q); | |
f37bc346 | 690 | struct device *dev = q->dev; |
0f3154e6 SN |
691 | dma_addr_t dma_addr; |
692 | ||
693 | dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); | |
694 | if (dma_mapping_error(dev, dma_addr)) { | |
695 | net_warn_ratelimited("%s: DMA single map failed on %s!\n", | |
696 | q->lif->netdev->name, q->name); | |
697 | stats->dma_map_err++; | |
698 | return 0; | |
699 | } | |
700 | return dma_addr; | |
701 | } | |
702 | ||
5b3f3f2a SN |
703 | static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, |
704 | const skb_frag_t *frag, | |
0f3154e6 SN |
705 | size_t offset, size_t len) |
706 | { | |
707 | struct ionic_tx_stats *stats = q_to_tx_stats(q); | |
f37bc346 | 708 | struct device *dev = q->dev; |
0f3154e6 SN |
709 | dma_addr_t dma_addr; |
710 | ||
711 | dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); | |
712 | if (dma_mapping_error(dev, dma_addr)) { | |
713 | net_warn_ratelimited("%s: DMA frag map failed on %s!\n", | |
714 | q->lif->netdev->name, q->name); | |
715 | stats->dma_map_err++; | |
716 | } | |
717 | return dma_addr; | |
718 | } | |
719 | ||
2da479ca SN |
720 | static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, |
721 | struct ionic_desc_info *desc_info) | |
5b039241 | 722 | { |
2da479ca | 723 | struct ionic_buf_info *buf_info = desc_info->bufs; |
0f4e7f4e | 724 | struct ionic_tx_stats *stats = q_to_tx_stats(q); |
5b039241 SN |
725 | struct device *dev = q->dev; |
726 | dma_addr_t dma_addr; | |
2da479ca | 727 | unsigned int nfrags; |
5b039241 SN |
728 | skb_frag_t *frag; |
729 | int frag_idx; | |
730 | ||
731 | dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); | |
0f4e7f4e SN |
732 | if (dma_mapping_error(dev, dma_addr)) { |
733 | stats->dma_map_err++; | |
5b039241 | 734 | return -EIO; |
0f4e7f4e | 735 | } |
5b039241 SN |
736 | buf_info->dma_addr = dma_addr; |
737 | buf_info->len = skb_headlen(skb); | |
738 | buf_info++; | |
739 | ||
2da479ca SN |
740 | frag = skb_shinfo(skb)->frags; |
741 | nfrags = skb_shinfo(skb)->nr_frags; | |
742 | for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { | |
5b039241 | 743 | dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); |
0f4e7f4e SN |
744 | if (dma_mapping_error(dev, dma_addr)) { |
745 | stats->dma_map_err++; | |
5b039241 | 746 | goto dma_fail; |
0f4e7f4e | 747 | } |
5b039241 SN |
748 | buf_info->dma_addr = dma_addr; |
749 | buf_info->len = skb_frag_size(frag); | |
2da479ca | 750 | buf_info++; |
5b039241 SN |
751 | } |
752 | ||
2da479ca SN |
753 | desc_info->nbufs = 1 + nfrags; |
754 | ||
5b039241 SN |
755 | return 0; |
756 | ||
757 | dma_fail: | |
758 | /* unwind the frag mappings and the head mapping */ | |
759 | while (frag_idx > 0) { | |
760 | frag_idx--; | |
761 | buf_info--; | |
762 | dma_unmap_page(dev, buf_info->dma_addr, | |
763 | buf_info->len, DMA_TO_DEVICE); | |
764 | } | |
765 | dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); | |
766 | return -EIO; | |
767 | } | |
768 | ||
238a0f7c BC |
769 | static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, |
770 | struct ionic_desc_info *desc_info) | |
771 | { | |
772 | struct ionic_buf_info *buf_info = desc_info->bufs; | |
773 | struct device *dev = q->dev; | |
774 | unsigned int i; | |
775 | ||
776 | if (!desc_info->nbufs) | |
777 | return; | |
778 | ||
779 | dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, | |
780 | buf_info->len, DMA_TO_DEVICE); | |
781 | buf_info++; | |
782 | for (i = 1; i < desc_info->nbufs; i++, buf_info++) | |
783 | dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, | |
784 | buf_info->len, DMA_TO_DEVICE); | |
785 | ||
786 | desc_info->nbufs = 0; | |
787 | } | |
788 | ||
5b3f3f2a SN |
789 | static void ionic_tx_clean(struct ionic_queue *q, |
790 | struct ionic_desc_info *desc_info, | |
791 | struct ionic_cq_info *cq_info, | |
792 | void *cb_arg) | |
0f3154e6 | 793 | { |
0f3154e6 | 794 | struct ionic_tx_stats *stats = q_to_tx_stats(q); |
a8771bfe SN |
795 | struct ionic_qcq *qcq = q_to_qcq(q); |
796 | struct sk_buff *skb = cb_arg; | |
a8771bfe | 797 | u16 qi; |
0f3154e6 | 798 | |
238a0f7c | 799 | ionic_tx_desc_unmap_bufs(q, desc_info); |
0f3154e6 | 800 | |
a8771bfe SN |
801 | if (!skb) |
802 | return; | |
0f3154e6 | 803 | |
a8771bfe SN |
804 | qi = skb_get_queue_mapping(skb); |
805 | ||
806 | if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) { | |
807 | if (cq_info) { | |
808 | struct skb_shared_hwtstamps hwts = {}; | |
809 | __le64 *cq_desc_hwstamp; | |
810 | u64 hwstamp; | |
811 | ||
812 | cq_desc_hwstamp = | |
813 | cq_info->cq_desc + | |
814 | qcq->cq.desc_size - | |
815 | sizeof(struct ionic_txq_comp) - | |
816 | IONIC_HWSTAMP_CQ_NEGOFFSET; | |
817 | ||
818 | hwstamp = le64_to_cpu(*cq_desc_hwstamp); | |
633eddf1 | 819 | |
a8771bfe SN |
820 | if (hwstamp != IONIC_HWSTAMP_INVALID) { |
821 | hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp); | |
633eddf1 | 822 | |
a8771bfe SN |
823 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
824 | skb_tstamp_tx(skb, &hwts); | |
825 | ||
826 | stats->hwstamp_valid++; | |
827 | } else { | |
828 | stats->hwstamp_invalid++; | |
829 | } | |
830 | } | |
831 | ||
832 | } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) { | |
833 | netif_wake_subqueue(q->lif->netdev, qi); | |
0f3154e6 | 834 | } |
a8771bfe SN |
835 | |
836 | desc_info->bytes = skb->len; | |
837 | stats->clean++; | |
838 | ||
839 | dev_consume_skb_any(skb); | |
0f3154e6 SN |
840 | } |
841 | ||
a8771bfe | 842 | bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) |
0f3154e6 | 843 | { |
0f3154e6 SN |
844 | struct ionic_queue *q = cq->bound_q; |
845 | struct ionic_desc_info *desc_info; | |
0ec9f666 | 846 | struct ionic_txq_comp *comp; |
633eddf1 SN |
847 | int bytes = 0; |
848 | int pkts = 0; | |
339dcf7f | 849 | u16 index; |
0f3154e6 | 850 | |
0ec9f666 SN |
851 | comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); |
852 | ||
b14e4e95 SN |
853 | if (!color_match(comp->color, cq->done_color)) |
854 | return false; | |
855 | ||
856 | /* clean the related q entries, there could be | |
857 | * several q entries completed for each cq completion | |
858 | */ | |
859 | do { | |
f1d2e894 | 860 | desc_info = &q->info[q->tail_idx]; |
633eddf1 | 861 | desc_info->bytes = 0; |
339dcf7f | 862 | index = q->tail_idx; |
f1d2e894 SN |
863 | q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); |
864 | ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); | |
633eddf1 SN |
865 | if (desc_info->cb_arg) { |
866 | pkts++; | |
867 | bytes += desc_info->bytes; | |
868 | } | |
b14e4e95 SN |
869 | desc_info->cb = NULL; |
870 | desc_info->cb_arg = NULL; | |
339dcf7f | 871 | } while (index != le16_to_cpu(comp->comp_index)); |
b14e4e95 | 872 | |
a8771bfe | 873 | if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) |
633eddf1 SN |
874 | netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); |
875 | ||
b14e4e95 SN |
876 | return true; |
877 | } | |
878 | ||
879 | void ionic_tx_flush(struct ionic_cq *cq) | |
880 | { | |
881 | struct ionic_dev *idev = &cq->lif->ionic->idev; | |
882 | u32 work_done; | |
883 | ||
884 | work_done = ionic_cq_service(cq, cq->num_descs, | |
885 | ionic_tx_service, NULL, NULL); | |
0f3154e6 SN |
886 | if (work_done) |
887 | ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, | |
b14e4e95 | 888 | work_done, IONIC_INTR_CRED_RESET_COALESCE); |
0f3154e6 SN |
889 | } |
890 | ||
f9c00e2c SN |
891 | void ionic_tx_empty(struct ionic_queue *q) |
892 | { | |
893 | struct ionic_desc_info *desc_info; | |
633eddf1 SN |
894 | int bytes = 0; |
895 | int pkts = 0; | |
f9c00e2c SN |
896 | |
897 | /* walk the not completed tx entries, if any */ | |
f1d2e894 SN |
898 | while (q->head_idx != q->tail_idx) { |
899 | desc_info = &q->info[q->tail_idx]; | |
633eddf1 | 900 | desc_info->bytes = 0; |
f1d2e894 | 901 | q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); |
f9c00e2c | 902 | ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); |
633eddf1 SN |
903 | if (desc_info->cb_arg) { |
904 | pkts++; | |
905 | bytes += desc_info->bytes; | |
906 | } | |
f9c00e2c SN |
907 | desc_info->cb = NULL; |
908 | desc_info->cb_arg = NULL; | |
f9c00e2c | 909 | } |
633eddf1 | 910 | |
a8771bfe | 911 | if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) |
633eddf1 | 912 | netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes); |
f9c00e2c SN |
913 | } |
914 | ||
0f3154e6 SN |
915 | static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) |
916 | { | |
917 | int err; | |
918 | ||
919 | err = skb_cow_head(skb, 0); | |
920 | if (err) | |
921 | return err; | |
922 | ||
923 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { | |
924 | inner_ip_hdr(skb)->check = 0; | |
925 | inner_tcp_hdr(skb)->check = | |
926 | ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, | |
927 | inner_ip_hdr(skb)->daddr, | |
928 | 0, IPPROTO_TCP, 0); | |
929 | } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { | |
930 | inner_tcp_hdr(skb)->check = | |
931 | ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, | |
932 | &inner_ipv6_hdr(skb)->daddr, | |
933 | 0, IPPROTO_TCP, 0); | |
934 | } | |
935 | ||
936 | return 0; | |
937 | } | |
938 | ||
939 | static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) | |
940 | { | |
941 | int err; | |
942 | ||
943 | err = skb_cow_head(skb, 0); | |
944 | if (err) | |
945 | return err; | |
946 | ||
947 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { | |
948 | ip_hdr(skb)->check = 0; | |
949 | tcp_hdr(skb)->check = | |
950 | ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
951 | ip_hdr(skb)->daddr, | |
952 | 0, IPPROTO_TCP, 0); | |
953 | } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { | |
fa6b8429 | 954 | tcp_v6_gso_csum_prep(skb); |
0f3154e6 SN |
955 | } |
956 | ||
957 | return 0; | |
958 | } | |
959 | ||
40bc471d SN |
960 | static void ionic_tx_tso_post(struct ionic_queue *q, |
961 | struct ionic_desc_info *desc_info, | |
0f3154e6 SN |
962 | struct sk_buff *skb, |
963 | dma_addr_t addr, u8 nsge, u16 len, | |
964 | unsigned int hdrlen, unsigned int mss, | |
965 | bool outer_csum, | |
966 | u16 vlan_tci, bool has_vlan, | |
967 | bool start, bool done) | |
968 | { | |
40bc471d | 969 | struct ionic_txq_desc *desc = desc_info->desc; |
0f3154e6 SN |
970 | u8 flags = 0; |
971 | u64 cmd; | |
972 | ||
973 | flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; | |
974 | flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; | |
975 | flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; | |
976 | flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; | |
977 | ||
978 | cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); | |
979 | desc->cmd = cpu_to_le64(cmd); | |
980 | desc->len = cpu_to_le16(len); | |
981 | desc->vlan_tci = cpu_to_le16(vlan_tci); | |
982 | desc->hdr_len = cpu_to_le16(hdrlen); | |
983 | desc->mss = cpu_to_le16(mss); | |
984 | ||
40bc471d SN |
985 | ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); |
986 | ||
2da479ca | 987 | if (start) { |
0f3154e6 | 988 | skb_tx_timestamp(skb); |
a8771bfe SN |
989 | if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) |
990 | netdev_tx_sent_queue(q_to_ndq(q), skb->len); | |
2da479ca | 991 | ionic_txq_post(q, false, ionic_tx_clean, skb); |
0f3154e6 | 992 | } else { |
2da479ca | 993 | ionic_txq_post(q, done, NULL, NULL); |
0f3154e6 SN |
994 | } |
995 | } | |
996 | ||
0f3154e6 SN |
997 | static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) |
998 | { | |
999 | struct ionic_tx_stats *stats = q_to_tx_stats(q); | |
2da479ca SN |
1000 | struct ionic_desc_info *desc_info; |
1001 | struct ionic_buf_info *buf_info; | |
0f3154e6 SN |
1002 | struct ionic_txq_sg_elem *elem; |
1003 | struct ionic_txq_desc *desc; | |
5b039241 SN |
1004 | unsigned int chunk_len; |
1005 | unsigned int frag_rem; | |
5b039241 SN |
1006 | unsigned int tso_rem; |
1007 | unsigned int seg_rem; | |
0f3154e6 | 1008 | dma_addr_t desc_addr; |
5b039241 | 1009 | dma_addr_t frag_addr; |
0f3154e6 | 1010 | unsigned int hdrlen; |
0f3154e6 SN |
1011 | unsigned int len; |
1012 | unsigned int mss; | |
0f3154e6 SN |
1013 | bool start, done; |
1014 | bool outer_csum; | |
1015 | bool has_vlan; | |
1016 | u16 desc_len; | |
1017 | u8 desc_nsge; | |
1018 | u16 vlan_tci; | |
1019 | bool encap; | |
1020 | int err; | |
1021 | ||
2da479ca SN |
1022 | desc_info = &q->info[q->head_idx]; |
1023 | buf_info = desc_info->bufs; | |
1024 | ||
1025 | if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) | |
5b039241 SN |
1026 | return -EIO; |
1027 | ||
1028 | len = skb->len; | |
0f3154e6 | 1029 | mss = skb_shinfo(skb)->gso_size; |
cad478c7 NP |
1030 | outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | |
1031 | SKB_GSO_GRE_CSUM | | |
1032 | SKB_GSO_IPXIP4 | | |
1033 | SKB_GSO_IPXIP6 | | |
1034 | SKB_GSO_UDP_TUNNEL | | |
1035 | SKB_GSO_UDP_TUNNEL_CSUM)); | |
0f3154e6 SN |
1036 | has_vlan = !!skb_vlan_tag_present(skb); |
1037 | vlan_tci = skb_vlan_tag_get(skb); | |
1038 | encap = skb->encapsulation; | |
1039 | ||
1040 | /* Preload inner-most TCP csum field with IP pseudo hdr | |
1041 | * calculated with IP length set to zero. HW will later | |
1042 | * add in length to each TCP segment resulting from the TSO. | |
1043 | */ | |
1044 | ||
1045 | if (encap) | |
1046 | err = ionic_tx_tcp_inner_pseudo_csum(skb); | |
1047 | else | |
1048 | err = ionic_tx_tcp_pseudo_csum(skb); | |
238a0f7c BC |
1049 | if (err) { |
1050 | /* clean up mapping from ionic_tx_map_skb */ | |
1051 | ionic_tx_desc_unmap_bufs(q, desc_info); | |
0f3154e6 | 1052 | return err; |
238a0f7c | 1053 | } |
0f3154e6 SN |
1054 | |
1055 | if (encap) | |
504148fe | 1056 | hdrlen = skb_inner_tcp_all_headers(skb); |
0f3154e6 | 1057 | else |
504148fe | 1058 | hdrlen = skb_tcp_all_headers(skb); |
0f3154e6 | 1059 | |
5b039241 SN |
1060 | tso_rem = len; |
1061 | seg_rem = min(tso_rem, hdrlen + mss); | |
0f3154e6 | 1062 | |
5b039241 SN |
1063 | frag_addr = 0; |
1064 | frag_rem = 0; | |
0f3154e6 | 1065 | |
5b039241 | 1066 | start = true; |
0f3154e6 | 1067 | |
5b039241 SN |
1068 | while (tso_rem > 0) { |
1069 | desc = NULL; | |
1070 | elem = NULL; | |
1071 | desc_addr = 0; | |
1072 | desc_len = 0; | |
0f3154e6 | 1073 | desc_nsge = 0; |
2da479ca | 1074 | /* use fragments until we have enough to post a single descriptor */ |
5b039241 | 1075 | while (seg_rem > 0) { |
2da479ca | 1076 | /* if the fragment is exhausted then move to the next one */ |
5b039241 SN |
1077 | if (frag_rem == 0) { |
1078 | /* grab the next fragment */ | |
2da479ca SN |
1079 | frag_addr = buf_info->dma_addr; |
1080 | frag_rem = buf_info->len; | |
1081 | buf_info++; | |
5b039241 SN |
1082 | } |
1083 | chunk_len = min(frag_rem, seg_rem); | |
1084 | if (!desc) { | |
1085 | /* fill main descriptor */ | |
2da479ca SN |
1086 | desc = desc_info->txq_desc; |
1087 | elem = desc_info->txq_sg_desc->elems; | |
5b039241 SN |
1088 | desc_addr = frag_addr; |
1089 | desc_len = chunk_len; | |
0f3154e6 | 1090 | } else { |
5b039241 SN |
1091 | /* fill sg descriptor */ |
1092 | elem->addr = cpu_to_le64(frag_addr); | |
1093 | elem->len = cpu_to_le16(chunk_len); | |
1094 | elem++; | |
1095 | desc_nsge++; | |
0f3154e6 | 1096 | } |
5b039241 SN |
1097 | frag_addr += chunk_len; |
1098 | frag_rem -= chunk_len; | |
1099 | tso_rem -= chunk_len; | |
1100 | seg_rem -= chunk_len; | |
0f3154e6 | 1101 | } |
5b039241 SN |
1102 | seg_rem = min(tso_rem, mss); |
1103 | done = (tso_rem == 0); | |
5b039241 | 1104 | /* post descriptor */ |
40bc471d | 1105 | ionic_tx_tso_post(q, desc_info, skb, |
5b039241 SN |
1106 | desc_addr, desc_nsge, desc_len, |
1107 | hdrlen, mss, outer_csum, vlan_tci, has_vlan, | |
1108 | start, done); | |
1109 | start = false; | |
2da479ca SN |
1110 | /* Buffer information is stored with the first tso descriptor */ |
1111 | desc_info = &q->info[q->head_idx]; | |
1112 | desc_info->nbufs = 0; | |
0f3154e6 SN |
1113 | } |
1114 | ||
5b039241 SN |
1115 | stats->pkts += DIV_ROUND_UP(len - hdrlen, mss); |
1116 | stats->bytes += len; | |
0f3154e6 | 1117 | stats->tso++; |
5b039241 | 1118 | stats->tso_bytes = len; |
0f3154e6 SN |
1119 | |
1120 | return 0; | |
0f3154e6 SN |
1121 | } |
1122 | ||
238a0f7c BC |
1123 | static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, |
1124 | struct ionic_desc_info *desc_info) | |
0f3154e6 | 1125 | { |
2da479ca SN |
1126 | struct ionic_txq_desc *desc = desc_info->txq_desc; |
1127 | struct ionic_buf_info *buf_info = desc_info->bufs; | |
0f3154e6 | 1128 | struct ionic_tx_stats *stats = q_to_tx_stats(q); |
0f3154e6 SN |
1129 | bool has_vlan; |
1130 | u8 flags = 0; | |
1131 | bool encap; | |
1132 | u64 cmd; | |
1133 | ||
1134 | has_vlan = !!skb_vlan_tag_present(skb); | |
1135 | encap = skb->encapsulation; | |
1136 | ||
0f3154e6 SN |
1137 | flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; |
1138 | flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; | |
1139 | ||
1140 | cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, | |
2da479ca SN |
1141 | flags, skb_shinfo(skb)->nr_frags, |
1142 | buf_info->dma_addr); | |
0f3154e6 | 1143 | desc->cmd = cpu_to_le64(cmd); |
2da479ca | 1144 | desc->len = cpu_to_le16(buf_info->len); |
f64e0c56 SN |
1145 | if (has_vlan) { |
1146 | desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); | |
1147 | stats->vlan_inserted++; | |
2da479ca SN |
1148 | } else { |
1149 | desc->vlan_tci = 0; | |
f64e0c56 | 1150 | } |
2da479ca SN |
1151 | desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); |
1152 | desc->csum_offset = cpu_to_le16(skb->csum_offset); | |
0f3154e6 | 1153 | |
40bc471d SN |
1154 | ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); |
1155 | ||
fa821170 | 1156 | if (skb_csum_is_sctp(skb)) |
0f3154e6 SN |
1157 | stats->crc32_csum++; |
1158 | else | |
1159 | stats->csum++; | |
0f3154e6 SN |
1160 | } |
1161 | ||
238a0f7c BC |
1162 | static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, |
1163 | struct ionic_desc_info *desc_info) | |
0f3154e6 | 1164 | { |
2da479ca SN |
1165 | struct ionic_txq_desc *desc = desc_info->txq_desc; |
1166 | struct ionic_buf_info *buf_info = desc_info->bufs; | |
0f3154e6 | 1167 | struct ionic_tx_stats *stats = q_to_tx_stats(q); |
0f3154e6 SN |
1168 | bool has_vlan; |
1169 | u8 flags = 0; | |
1170 | bool encap; | |
1171 | u64 cmd; | |
1172 | ||
1173 | has_vlan = !!skb_vlan_tag_present(skb); | |
1174 | encap = skb->encapsulation; | |
1175 | ||
0f3154e6 SN |
1176 | flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; |
1177 | flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; | |
1178 | ||
1179 | cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, | |
2da479ca SN |
1180 | flags, skb_shinfo(skb)->nr_frags, |
1181 | buf_info->dma_addr); | |
0f3154e6 | 1182 | desc->cmd = cpu_to_le64(cmd); |
2da479ca | 1183 | desc->len = cpu_to_le16(buf_info->len); |
f64e0c56 SN |
1184 | if (has_vlan) { |
1185 | desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); | |
1186 | stats->vlan_inserted++; | |
2da479ca SN |
1187 | } else { |
1188 | desc->vlan_tci = 0; | |
f64e0c56 | 1189 | } |
2da479ca SN |
1190 | desc->csum_start = 0; |
1191 | desc->csum_offset = 0; | |
0f3154e6 | 1192 | |
40bc471d SN |
1193 | ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); |
1194 | ||
f64e0c56 | 1195 | stats->csum_none++; |
0f3154e6 SN |
1196 | } |
1197 | ||
238a0f7c BC |
1198 | static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, |
1199 | struct ionic_desc_info *desc_info) | |
0f3154e6 | 1200 | { |
2da479ca SN |
1201 | struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc; |
1202 | struct ionic_buf_info *buf_info = &desc_info->bufs[1]; | |
0f3154e6 SN |
1203 | struct ionic_txq_sg_elem *elem = sg_desc->elems; |
1204 | struct ionic_tx_stats *stats = q_to_tx_stats(q); | |
2da479ca | 1205 | unsigned int i; |
0f3154e6 | 1206 | |
2da479ca SN |
1207 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { |
1208 | elem->addr = cpu_to_le64(buf_info->dma_addr); | |
1209 | elem->len = cpu_to_le16(buf_info->len); | |
0f3154e6 SN |
1210 | } |
1211 | ||
2da479ca | 1212 | stats->frags += skb_shinfo(skb)->nr_frags; |
0f3154e6 SN |
1213 | } |
1214 | ||
1215 | static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) | |
1216 | { | |
2da479ca | 1217 | struct ionic_desc_info *desc_info = &q->info[q->head_idx]; |
0f3154e6 | 1218 | struct ionic_tx_stats *stats = q_to_tx_stats(q); |
0f3154e6 | 1219 | |
2da479ca SN |
1220 | if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) |
1221 | return -EIO; | |
1222 | ||
0f3154e6 SN |
1223 | /* set up the initial descriptor */ |
1224 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
238a0f7c | 1225 | ionic_tx_calc_csum(q, skb, desc_info); |
0f3154e6 | 1226 | else |
238a0f7c | 1227 | ionic_tx_calc_no_csum(q, skb, desc_info); |
0f3154e6 SN |
1228 | |
1229 | /* add frags */ | |
238a0f7c | 1230 | ionic_tx_skb_frags(q, skb, desc_info); |
0f3154e6 SN |
1231 | |
1232 | skb_tx_timestamp(skb); | |
1233 | stats->pkts++; | |
1234 | stats->bytes += skb->len; | |
1235 | ||
a8771bfe SN |
1236 | if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) |
1237 | netdev_tx_sent_queue(q_to_ndq(q), skb->len); | |
0f3154e6 SN |
1238 | ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); |
1239 | ||
1240 | return 0; | |
1241 | } | |
1242 | ||
1243 | static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) | |
1244 | { | |
1245 | struct ionic_tx_stats *stats = q_to_tx_stats(q); | |
d2c21422 | 1246 | int ndescs; |
0f3154e6 SN |
1247 | int err; |
1248 | ||
d2c21422 | 1249 | /* Each desc is mss long max, so a descriptor for each gso_seg */ |
0f3154e6 | 1250 | if (skb_is_gso(skb)) |
d2c21422 SN |
1251 | ndescs = skb_shinfo(skb)->gso_segs; |
1252 | else | |
1253 | ndescs = 1; | |
0f3154e6 SN |
1254 | |
1255 | /* If non-TSO, just need 1 desc and nr_frags sg elems */ | |
f37bc346 | 1256 | if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems) |
d2c21422 | 1257 | return ndescs; |
0f3154e6 SN |
1258 | |
1259 | /* Too many frags, so linearize */ | |
1260 | err = skb_linearize(skb); | |
1261 | if (err) | |
1262 | return err; | |
1263 | ||
1264 | stats->linearize++; | |
1265 | ||
d2c21422 | 1266 | return ndescs; |
0f3154e6 SN |
1267 | } |
1268 | ||
1269 | static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) | |
1270 | { | |
1271 | int stopped = 0; | |
1272 | ||
1273 | if (unlikely(!ionic_q_has_space(q, ndescs))) { | |
1274 | netif_stop_subqueue(q->lif->netdev, q->index); | |
0f3154e6 SN |
1275 | stopped = 1; |
1276 | ||
1277 | /* Might race with ionic_tx_clean, check again */ | |
1278 | smp_rmb(); | |
1279 | if (ionic_q_has_space(q, ndescs)) { | |
1280 | netif_wake_subqueue(q->lif->netdev, q->index); | |
1281 | stopped = 0; | |
1282 | } | |
1283 | } | |
1284 | ||
1285 | return stopped; | |
1286 | } | |
1287 | ||
a8771bfe SN |
1288 | static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb, |
1289 | struct net_device *netdev) | |
1290 | { | |
1291 | struct ionic_lif *lif = netdev_priv(netdev); | |
1292 | struct ionic_queue *q = &lif->hwstamp_txq->q; | |
1293 | int err, ndescs; | |
1294 | ||
1295 | /* Does not stop/start txq, because we post to a separate tx queue | |
1296 | * for timestamping, and if a packet can't be posted immediately to | |
1297 | * the timestamping queue, it is dropped. | |
1298 | */ | |
1299 | ||
1300 | ndescs = ionic_tx_descs_needed(q, skb); | |
1301 | if (unlikely(ndescs < 0)) | |
1302 | goto err_out_drop; | |
1303 | ||
1304 | if (unlikely(!ionic_q_has_space(q, ndescs))) | |
1305 | goto err_out_drop; | |
1306 | ||
bd7856bc | 1307 | skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP; |
a8771bfe SN |
1308 | if (skb_is_gso(skb)) |
1309 | err = ionic_tx_tso(q, skb); | |
1310 | else | |
1311 | err = ionic_tx(q, skb); | |
1312 | ||
1313 | if (err) | |
1314 | goto err_out_drop; | |
1315 | ||
1316 | return NETDEV_TX_OK; | |
1317 | ||
1318 | err_out_drop: | |
1319 | q->drop++; | |
1320 | dev_kfree_skb(skb); | |
1321 | return NETDEV_TX_OK; | |
1322 | } | |
1323 | ||
0f3154e6 SN |
1324 | netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) |
1325 | { | |
1326 | u16 queue_index = skb_get_queue_mapping(skb); | |
1327 | struct ionic_lif *lif = netdev_priv(netdev); | |
1328 | struct ionic_queue *q; | |
1329 | int ndescs; | |
1330 | int err; | |
1331 | ||
c6d3d73a | 1332 | if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) { |
0f3154e6 SN |
1333 | dev_kfree_skb(skb); |
1334 | return NETDEV_TX_OK; | |
1335 | } | |
1336 | ||
a8771bfe | 1337 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) |
e2ce148e | 1338 | if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode) |
a8771bfe SN |
1339 | return ionic_start_hwstamp_xmit(skb, netdev); |
1340 | ||
34dec947 | 1341 | if (unlikely(queue_index >= lif->nxqs)) |
0f3154e6 | 1342 | queue_index = 0; |
34dec947 | 1343 | q = &lif->txqcqs[queue_index]->q; |
0f3154e6 SN |
1344 | |
1345 | ndescs = ionic_tx_descs_needed(q, skb); | |
1346 | if (ndescs < 0) | |
1347 | goto err_out_drop; | |
1348 | ||
1349 | if (unlikely(ionic_maybe_stop_tx(q, ndescs))) | |
1350 | return NETDEV_TX_BUSY; | |
1351 | ||
1352 | if (skb_is_gso(skb)) | |
1353 | err = ionic_tx_tso(q, skb); | |
1354 | else | |
1355 | err = ionic_tx(q, skb); | |
1356 | ||
1357 | if (err) | |
1358 | goto err_out_drop; | |
1359 | ||
1360 | /* Stop the queue if there aren't descriptors for the next packet. | |
1361 | * Since our SG lists per descriptor take care of most of the possible | |
1362 | * fragmentation, we don't need to have many descriptors available. | |
1363 | */ | |
1364 | ionic_maybe_stop_tx(q, 4); | |
1365 | ||
1366 | return NETDEV_TX_OK; | |
1367 | ||
1368 | err_out_drop: | |
0f3154e6 SN |
1369 | q->drop++; |
1370 | dev_kfree_skb(skb); | |
1371 | return NETDEV_TX_OK; | |
1372 | } |