Commit | Line | Data |
---|---|---|
c6e0d914 CL |
1 | /* |
2 | * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet | |
3 | * driver for Linux. | |
4 | * | |
5 | * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. | |
6 | * | |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | */ | |
35 | ||
36 | #include <linux/skbuff.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/etherdevice.h> | |
39 | #include <linux/if_vlan.h> | |
40 | #include <linux/ip.h> | |
41 | #include <net/ipv6.h> | |
42 | #include <net/tcp.h> | |
43 | #include <linux/dma-mapping.h> | |
70c71606 | 44 | #include <linux/prefetch.h> |
c6e0d914 CL |
45 | |
46 | #include "t4vf_common.h" | |
47 | #include "t4vf_defs.h" | |
48 | ||
49 | #include "../cxgb4/t4_regs.h" | |
50 | #include "../cxgb4/t4fw_api.h" | |
51 | #include "../cxgb4/t4_msg.h" | |
52 | ||
53 | /* | |
54 | * Decoded Adapter Parameters. | |
55 | */ | |
56 | static u32 FL_PG_ORDER; /* large page allocation size */ | |
57 | static u32 STAT_LEN; /* length of status page at ring end */ | |
58 | static u32 PKTSHIFT; /* padding between CPL and packet data */ | |
59 | static u32 FL_ALIGN; /* response queue message alignment */ | |
60 | ||
61 | /* | |
62 | * Constants ... | |
63 | */ | |
64 | enum { | |
65 | /* | |
66 | * Egress Queue sizes, producer and consumer indices are all in units | |
67 | * of Egress Context Units bytes. Note that as far as the hardware is | |
68 | * concerned, the free list is an Egress Queue (the host produces free | |
69 | * buffers which the hardware consumes) and free list entries are | |
70 | * 64-bit PCI DMA addresses. | |
71 | */ | |
72 | EQ_UNIT = SGE_EQ_IDXSIZE, | |
73 | FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), | |
74 | TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), | |
75 | ||
76 | /* | |
77 | * Max number of TX descriptors we clean up at a time. Should be | |
78 | * modest as freeing skbs isn't cheap and it happens while holding | |
79 | * locks. We just need to free packets faster than they arrive, we | |
80 | * eventually catch up and keep the amortized cost reasonable. | |
81 | */ | |
82 | MAX_TX_RECLAIM = 16, | |
83 | ||
84 | /* | |
85 | * Max number of Rx buffers we replenish at a time. Again keep this | |
86 | * modest, allocating buffers isn't cheap either. | |
87 | */ | |
88 | MAX_RX_REFILL = 16, | |
89 | ||
90 | /* | |
91 | * Period of the Rx queue check timer. This timer is infrequent as it | |
92 | * has something to do only when the system experiences severe memory | |
93 | * shortage. | |
94 | */ | |
95 | RX_QCHECK_PERIOD = (HZ / 2), | |
96 | ||
97 | /* | |
98 | * Period of the TX queue check timer and the maximum number of TX | |
99 | * descriptors to be reclaimed by the TX timer. | |
100 | */ | |
101 | TX_QCHECK_PERIOD = (HZ / 2), | |
102 | MAX_TIMER_TX_RECLAIM = 100, | |
103 | ||
104 | /* | |
105 | * An FL with <= FL_STARVE_THRES buffers is starving and a periodic | |
106 | * timer will attempt to refill it. | |
107 | */ | |
108 | FL_STARVE_THRES = 4, | |
109 | ||
110 | /* | |
111 | * Suspend an Ethernet TX queue with fewer available descriptors than | |
112 | * this. We always want to have room for a maximum sized packet: | |
113 | * inline immediate data + MAX_SKB_FRAGS. This is the same as | |
114 | * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS | |
115 | * (see that function and its helpers for a description of the | |
116 | * calculation). | |
117 | */ | |
118 | ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1, | |
119 | ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 + | |
120 | ((ETHTXQ_MAX_FRAGS-1) & 1) + | |
121 | 2), | |
122 | ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + | |
123 | sizeof(struct cpl_tx_pkt_lso_core) + | |
124 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), | |
125 | ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR, | |
126 | ||
127 | ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT), | |
128 | ||
129 | /* | |
130 | * Max TX descriptor space we allow for an Ethernet packet to be | |
131 | * inlined into a WR. This is limited by the maximum value which | |
132 | * we can specify for immediate data in the firmware Ethernet TX | |
133 | * Work Request. | |
134 | */ | |
135 | MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK, | |
136 | ||
137 | /* | |
138 | * Max size of a WR sent through a control TX queue. | |
139 | */ | |
140 | MAX_CTRL_WR_LEN = 256, | |
141 | ||
142 | /* | |
143 | * Maximum amount of data which we'll ever need to inline into a | |
144 | * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN). | |
145 | */ | |
146 | MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN | |
147 | ? MAX_IMM_TX_PKT_LEN | |
148 | : MAX_CTRL_WR_LEN), | |
149 | ||
150 | /* | |
151 | * For incoming packets less than RX_COPY_THRES, we copy the data into | |
152 | * an skb rather than referencing the data. We allocate enough | |
153 | * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes | |
154 | * of the data (header). | |
155 | */ | |
156 | RX_COPY_THRES = 256, | |
157 | RX_PULL_LEN = 128, | |
c6e0d914 | 158 | |
eb6c503d CL |
159 | /* |
160 | * Main body length for sk_buffs used for RX Ethernet packets with | |
161 | * fragments. Should be >= RX_PULL_LEN but possibly bigger to give | |
162 | * pskb_may_pull() some room. | |
163 | */ | |
164 | RX_SKB_LEN = 512, | |
165 | }; | |
c6e0d914 CL |
166 | |
167 | /* | |
168 | * Software state per TX descriptor. | |
169 | */ | |
170 | struct tx_sw_desc { | |
171 | struct sk_buff *skb; /* socket buffer of TX data source */ | |
172 | struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */ | |
173 | }; | |
174 | ||
175 | /* | |
176 | * Software state per RX Free List descriptor. We keep track of the allocated | |
177 | * FL page, its size, and its PCI DMA address (if the page is mapped). The FL | |
178 | * page size and its PCI DMA mapped state are stored in the low bits of the | |
179 | * PCI DMA address as per below. | |
180 | */ | |
181 | struct rx_sw_desc { | |
182 | struct page *page; /* Free List page buffer */ | |
183 | dma_addr_t dma_addr; /* PCI DMA address (if mapped) */ | |
184 | /* and flags (see below) */ | |
185 | }; | |
186 | ||
187 | /* | |
188 | * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the | |
189 | * SGE also uses the low 4 bits to determine the size of the buffer. It uses | |
190 | * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array. | |
191 | * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4 | |
192 | * bits can only contain a 0 or a 1 to indicate which size buffer we're giving | |
193 | * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is | |
194 | * maintained in an inverse sense so the hardware never sees that bit high. | |
195 | */ | |
196 | enum { | |
197 | RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */ | |
198 | RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ | |
199 | }; | |
200 | ||
201 | /** | |
202 | * get_buf_addr - return DMA buffer address of software descriptor | |
203 | * @sdesc: pointer to the software buffer descriptor | |
204 | * | |
205 | * Return the DMA buffer address of a software descriptor (stripping out | |
206 | * our low-order flag bits). | |
207 | */ | |
208 | static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc) | |
209 | { | |
210 | return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); | |
211 | } | |
212 | ||
213 | /** | |
214 | * is_buf_mapped - is buffer mapped for DMA? | |
215 | * @sdesc: pointer to the software buffer descriptor | |
216 | * | |
217 | * Determine whether the buffer associated with a software descriptor in | |
218 | * mapped for DMA or not. | |
219 | */ | |
220 | static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc) | |
221 | { | |
222 | return !(sdesc->dma_addr & RX_UNMAPPED_BUF); | |
223 | } | |
224 | ||
225 | /** | |
226 | * need_skb_unmap - does the platform need unmapping of sk_buffs? | |
227 | * | |
25985edc LDM |
228 | * Returns true if the platform needs sk_buff unmapping. The compiler |
229 | * optimizes away unnecessary code if this returns true. | |
c6e0d914 CL |
230 | */ |
231 | static inline int need_skb_unmap(void) | |
232 | { | |
57b2eaf7 FT |
233 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
234 | return 1; | |
235 | #else | |
236 | return 0; | |
237 | #endif | |
c6e0d914 CL |
238 | } |
239 | ||
240 | /** | |
241 | * txq_avail - return the number of available slots in a TX queue | |
242 | * @tq: the TX queue | |
243 | * | |
244 | * Returns the number of available descriptors in a TX queue. | |
245 | */ | |
246 | static inline unsigned int txq_avail(const struct sge_txq *tq) | |
247 | { | |
248 | return tq->size - 1 - tq->in_use; | |
249 | } | |
250 | ||
251 | /** | |
252 | * fl_cap - return the capacity of a Free List | |
253 | * @fl: the Free List | |
254 | * | |
255 | * Returns the capacity of a Free List. The capacity is less than the | |
256 | * size because an Egress Queue Index Unit worth of descriptors needs to | |
257 | * be left unpopulated, otherwise the Producer and Consumer indices PIDX | |
258 | * and CIDX will match and the hardware will think the FL is empty. | |
259 | */ | |
260 | static inline unsigned int fl_cap(const struct sge_fl *fl) | |
261 | { | |
262 | return fl->size - FL_PER_EQ_UNIT; | |
263 | } | |
264 | ||
265 | /** | |
266 | * fl_starving - return whether a Free List is starving. | |
267 | * @fl: the Free List | |
268 | * | |
269 | * Tests specified Free List to see whether the number of buffers | |
270 | * available to the hardware has falled below our "starvation" | |
25985edc | 271 | * threshold. |
c6e0d914 CL |
272 | */ |
273 | static inline bool fl_starving(const struct sge_fl *fl) | |
274 | { | |
275 | return fl->avail - fl->pend_cred <= FL_STARVE_THRES; | |
276 | } | |
277 | ||
278 | /** | |
279 | * map_skb - map an skb for DMA to the device | |
280 | * @dev: the egress net device | |
281 | * @skb: the packet to map | |
282 | * @addr: a pointer to the base of the DMA mapping array | |
283 | * | |
284 | * Map an skb for DMA to the device and return an array of DMA addresses. | |
285 | */ | |
286 | static int map_skb(struct device *dev, const struct sk_buff *skb, | |
287 | dma_addr_t *addr) | |
288 | { | |
289 | const skb_frag_t *fp, *end; | |
290 | const struct skb_shared_info *si; | |
291 | ||
292 | *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); | |
293 | if (dma_mapping_error(dev, *addr)) | |
294 | goto out_err; | |
295 | ||
296 | si = skb_shinfo(skb); | |
297 | end = &si->frags[si->nr_frags]; | |
298 | for (fp = si->frags; fp < end; fp++) { | |
a0006a86 IC |
299 | *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), |
300 | DMA_TO_DEVICE); | |
c6e0d914 CL |
301 | if (dma_mapping_error(dev, *addr)) |
302 | goto unwind; | |
303 | } | |
304 | return 0; | |
305 | ||
306 | unwind: | |
307 | while (fp-- > si->frags) | |
9e903e08 | 308 | dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); |
c6e0d914 CL |
309 | dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); |
310 | ||
311 | out_err: | |
312 | return -ENOMEM; | |
313 | } | |
314 | ||
315 | static void unmap_sgl(struct device *dev, const struct sk_buff *skb, | |
316 | const struct ulptx_sgl *sgl, const struct sge_txq *tq) | |
317 | { | |
318 | const struct ulptx_sge_pair *p; | |
319 | unsigned int nfrags = skb_shinfo(skb)->nr_frags; | |
320 | ||
321 | if (likely(skb_headlen(skb))) | |
322 | dma_unmap_single(dev, be64_to_cpu(sgl->addr0), | |
323 | be32_to_cpu(sgl->len0), DMA_TO_DEVICE); | |
324 | else { | |
325 | dma_unmap_page(dev, be64_to_cpu(sgl->addr0), | |
326 | be32_to_cpu(sgl->len0), DMA_TO_DEVICE); | |
327 | nfrags--; | |
328 | } | |
329 | ||
330 | /* | |
331 | * the complexity below is because of the possibility of a wrap-around | |
332 | * in the middle of an SGL | |
333 | */ | |
334 | for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { | |
335 | if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { | |
336 | unmap: | |
337 | dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | |
338 | be32_to_cpu(p->len[0]), DMA_TO_DEVICE); | |
339 | dma_unmap_page(dev, be64_to_cpu(p->addr[1]), | |
340 | be32_to_cpu(p->len[1]), DMA_TO_DEVICE); | |
341 | p++; | |
342 | } else if ((u8 *)p == (u8 *)tq->stat) { | |
343 | p = (const struct ulptx_sge_pair *)tq->desc; | |
344 | goto unmap; | |
345 | } else if ((u8 *)p + 8 == (u8 *)tq->stat) { | |
346 | const __be64 *addr = (const __be64 *)tq->desc; | |
347 | ||
348 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | |
349 | be32_to_cpu(p->len[0]), DMA_TO_DEVICE); | |
350 | dma_unmap_page(dev, be64_to_cpu(addr[1]), | |
351 | be32_to_cpu(p->len[1]), DMA_TO_DEVICE); | |
352 | p = (const struct ulptx_sge_pair *)&addr[2]; | |
353 | } else { | |
354 | const __be64 *addr = (const __be64 *)tq->desc; | |
355 | ||
356 | dma_unmap_page(dev, be64_to_cpu(p->addr[0]), | |
357 | be32_to_cpu(p->len[0]), DMA_TO_DEVICE); | |
358 | dma_unmap_page(dev, be64_to_cpu(addr[0]), | |
359 | be32_to_cpu(p->len[1]), DMA_TO_DEVICE); | |
360 | p = (const struct ulptx_sge_pair *)&addr[1]; | |
361 | } | |
362 | } | |
363 | if (nfrags) { | |
364 | __be64 addr; | |
365 | ||
366 | if ((u8 *)p == (u8 *)tq->stat) | |
367 | p = (const struct ulptx_sge_pair *)tq->desc; | |
368 | addr = ((u8 *)p + 16 <= (u8 *)tq->stat | |
369 | ? p->addr[0] | |
370 | : *(const __be64 *)tq->desc); | |
371 | dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), | |
372 | DMA_TO_DEVICE); | |
373 | } | |
374 | } | |
375 | ||
376 | /** | |
377 | * free_tx_desc - reclaims TX descriptors and their buffers | |
378 | * @adapter: the adapter | |
379 | * @tq: the TX queue to reclaim descriptors from | |
380 | * @n: the number of descriptors to reclaim | |
381 | * @unmap: whether the buffers should be unmapped for DMA | |
382 | * | |
383 | * Reclaims TX descriptors from an SGE TX queue and frees the associated | |
384 | * TX buffers. Called with the TX queue lock held. | |
385 | */ | |
386 | static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, | |
387 | unsigned int n, bool unmap) | |
388 | { | |
389 | struct tx_sw_desc *sdesc; | |
390 | unsigned int cidx = tq->cidx; | |
391 | struct device *dev = adapter->pdev_dev; | |
392 | ||
393 | const int need_unmap = need_skb_unmap() && unmap; | |
394 | ||
395 | sdesc = &tq->sdesc[cidx]; | |
396 | while (n--) { | |
397 | /* | |
398 | * If we kept a reference to the original TX skb, we need to | |
399 | * unmap it from PCI DMA space (if required) and free it. | |
400 | */ | |
401 | if (sdesc->skb) { | |
402 | if (need_unmap) | |
403 | unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); | |
404 | kfree_skb(sdesc->skb); | |
405 | sdesc->skb = NULL; | |
406 | } | |
407 | ||
408 | sdesc++; | |
409 | if (++cidx == tq->size) { | |
410 | cidx = 0; | |
411 | sdesc = tq->sdesc; | |
412 | } | |
413 | } | |
414 | tq->cidx = cidx; | |
415 | } | |
416 | ||
417 | /* | |
418 | * Return the number of reclaimable descriptors in a TX queue. | |
419 | */ | |
420 | static inline int reclaimable(const struct sge_txq *tq) | |
421 | { | |
422 | int hw_cidx = be16_to_cpu(tq->stat->cidx); | |
423 | int reclaimable = hw_cidx - tq->cidx; | |
424 | if (reclaimable < 0) | |
425 | reclaimable += tq->size; | |
426 | return reclaimable; | |
427 | } | |
428 | ||
429 | /** | |
430 | * reclaim_completed_tx - reclaims completed TX descriptors | |
431 | * @adapter: the adapter | |
432 | * @tq: the TX queue to reclaim completed descriptors from | |
433 | * @unmap: whether the buffers should be unmapped for DMA | |
434 | * | |
435 | * Reclaims TX descriptors that the SGE has indicated it has processed, | |
436 | * and frees the associated buffers if possible. Called with the TX | |
437 | * queue locked. | |
438 | */ | |
439 | static inline void reclaim_completed_tx(struct adapter *adapter, | |
440 | struct sge_txq *tq, | |
441 | bool unmap) | |
442 | { | |
443 | int avail = reclaimable(tq); | |
444 | ||
445 | if (avail) { | |
446 | /* | |
447 | * Limit the amount of clean up work we do at a time to keep | |
448 | * the TX lock hold time O(1). | |
449 | */ | |
450 | if (avail > MAX_TX_RECLAIM) | |
451 | avail = MAX_TX_RECLAIM; | |
452 | ||
453 | free_tx_desc(adapter, tq, avail, unmap); | |
454 | tq->in_use -= avail; | |
455 | } | |
456 | } | |
457 | ||
458 | /** | |
459 | * get_buf_size - return the size of an RX Free List buffer. | |
460 | * @sdesc: pointer to the software buffer descriptor | |
461 | */ | |
462 | static inline int get_buf_size(const struct rx_sw_desc *sdesc) | |
463 | { | |
464 | return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) | |
465 | ? (PAGE_SIZE << FL_PG_ORDER) | |
466 | : PAGE_SIZE; | |
467 | } | |
468 | ||
469 | /** | |
470 | * free_rx_bufs - free RX buffers on an SGE Free List | |
471 | * @adapter: the adapter | |
472 | * @fl: the SGE Free List to free buffers from | |
473 | * @n: how many buffers to free | |
474 | * | |
475 | * Release the next @n buffers on an SGE Free List RX queue. The | |
476 | * buffers must be made inaccessible to hardware before calling this | |
477 | * function. | |
478 | */ | |
479 | static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) | |
480 | { | |
481 | while (n--) { | |
482 | struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; | |
483 | ||
484 | if (is_buf_mapped(sdesc)) | |
485 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), | |
486 | get_buf_size(sdesc), PCI_DMA_FROMDEVICE); | |
487 | put_page(sdesc->page); | |
488 | sdesc->page = NULL; | |
489 | if (++fl->cidx == fl->size) | |
490 | fl->cidx = 0; | |
491 | fl->avail--; | |
492 | } | |
493 | } | |
494 | ||
495 | /** | |
496 | * unmap_rx_buf - unmap the current RX buffer on an SGE Free List | |
497 | * @adapter: the adapter | |
498 | * @fl: the SGE Free List | |
499 | * | |
500 | * Unmap the current buffer on an SGE Free List RX queue. The | |
501 | * buffer must be made inaccessible to HW before calling this function. | |
502 | * | |
503 | * This is similar to @free_rx_bufs above but does not free the buffer. | |
504 | * Do note that the FL still loses any further access to the buffer. | |
505 | * This is used predominantly to "transfer ownership" of an FL buffer | |
506 | * to another entity (typically an skb's fragment list). | |
507 | */ | |
508 | static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) | |
509 | { | |
510 | struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; | |
511 | ||
512 | if (is_buf_mapped(sdesc)) | |
513 | dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), | |
514 | get_buf_size(sdesc), PCI_DMA_FROMDEVICE); | |
515 | sdesc->page = NULL; | |
516 | if (++fl->cidx == fl->size) | |
517 | fl->cidx = 0; | |
518 | fl->avail--; | |
519 | } | |
520 | ||
521 | /** | |
522 | * ring_fl_db - righ doorbell on free list | |
523 | * @adapter: the adapter | |
524 | * @fl: the Free List whose doorbell should be rung ... | |
525 | * | |
526 | * Tell the Scatter Gather Engine that there are new free list entries | |
527 | * available. | |
528 | */ | |
529 | static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) | |
530 | { | |
622c62b5 SR |
531 | u32 val; |
532 | ||
c6e0d914 CL |
533 | /* |
534 | * The SGE keeps track of its Producer and Consumer Indices in terms | |
535 | * of Egress Queue Units so we can only tell it about integral numbers | |
536 | * of multiples of Free List Entries per Egress Queue Units ... | |
537 | */ | |
538 | if (fl->pend_cred >= FL_PER_EQ_UNIT) { | |
622c62b5 | 539 | val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); |
70ee3666 | 540 | if (!is_t4(adapter->params.chip)) |
622c62b5 | 541 | val |= DBTYPE(1); |
c6e0d914 CL |
542 | wmb(); |
543 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, | |
ce91a923 | 544 | DBPRIO(1) | |
622c62b5 | 545 | QID(fl->cntxt_id) | val); |
c6e0d914 CL |
546 | fl->pend_cred %= FL_PER_EQ_UNIT; |
547 | } | |
548 | } | |
549 | ||
550 | /** | |
551 | * set_rx_sw_desc - initialize software RX buffer descriptor | |
552 | * @sdesc: pointer to the softwore RX buffer descriptor | |
553 | * @page: pointer to the page data structure backing the RX buffer | |
554 | * @dma_addr: PCI DMA address (possibly with low-bit flags) | |
555 | */ | |
556 | static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page, | |
557 | dma_addr_t dma_addr) | |
558 | { | |
559 | sdesc->page = page; | |
560 | sdesc->dma_addr = dma_addr; | |
561 | } | |
562 | ||
563 | /* | |
564 | * Support for poisoning RX buffers ... | |
565 | */ | |
566 | #define POISON_BUF_VAL -1 | |
567 | ||
568 | static inline void poison_buf(struct page *page, size_t sz) | |
569 | { | |
570 | #if POISON_BUF_VAL >= 0 | |
571 | memset(page_address(page), POISON_BUF_VAL, sz); | |
572 | #endif | |
573 | } | |
574 | ||
575 | /** | |
576 | * refill_fl - refill an SGE RX buffer ring | |
577 | * @adapter: the adapter | |
578 | * @fl: the Free List ring to refill | |
579 | * @n: the number of new buffers to allocate | |
580 | * @gfp: the gfp flags for the allocations | |
581 | * | |
582 | * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, | |
583 | * allocated with the supplied gfp flags. The caller must assure that | |
584 | * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN | |
585 | * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number | |
586 | * of buffers allocated. If afterwards the queue is found critically low, | |
587 | * mark it as starving in the bitmap of starving FLs. | |
588 | */ | |
589 | static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, | |
590 | int n, gfp_t gfp) | |
591 | { | |
592 | struct page *page; | |
593 | dma_addr_t dma_addr; | |
594 | unsigned int cred = fl->avail; | |
595 | __be64 *d = &fl->desc[fl->pidx]; | |
596 | struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; | |
597 | ||
598 | /* | |
599 | * Sanity: ensure that the result of adding n Free List buffers | |
600 | * won't result in wrapping the SGE's Producer Index around to | |
601 | * it's Consumer Index thereby indicating an empty Free List ... | |
602 | */ | |
603 | BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); | |
604 | ||
605 | /* | |
606 | * If we support large pages, prefer large buffers and fail over to | |
607 | * small pages if we can't allocate large pages to satisfy the refill. | |
608 | * If we don't support large pages, drop directly into the small page | |
609 | * allocation code. | |
610 | */ | |
611 | if (FL_PG_ORDER == 0) | |
612 | goto alloc_small_pages; | |
613 | ||
614 | while (n) { | |
615 | page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, | |
616 | FL_PG_ORDER); | |
617 | if (unlikely(!page)) { | |
618 | /* | |
619 | * We've failed inour attempt to allocate a "large | |
620 | * page". Fail over to the "small page" allocation | |
621 | * below. | |
622 | */ | |
623 | fl->large_alloc_failed++; | |
624 | break; | |
625 | } | |
626 | poison_buf(page, PAGE_SIZE << FL_PG_ORDER); | |
627 | ||
628 | dma_addr = dma_map_page(adapter->pdev_dev, page, 0, | |
629 | PAGE_SIZE << FL_PG_ORDER, | |
630 | PCI_DMA_FROMDEVICE); | |
631 | if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { | |
632 | /* | |
633 | * We've run out of DMA mapping space. Free up the | |
634 | * buffer and return with what we've managed to put | |
635 | * into the free list. We don't want to fail over to | |
636 | * the small page allocation below in this case | |
637 | * because DMA mapping resources are typically | |
638 | * critical resources once they become scarse. | |
639 | */ | |
640 | __free_pages(page, FL_PG_ORDER); | |
641 | goto out; | |
642 | } | |
643 | dma_addr |= RX_LARGE_BUF; | |
644 | *d++ = cpu_to_be64(dma_addr); | |
645 | ||
646 | set_rx_sw_desc(sdesc, page, dma_addr); | |
647 | sdesc++; | |
648 | ||
649 | fl->avail++; | |
650 | if (++fl->pidx == fl->size) { | |
651 | fl->pidx = 0; | |
652 | sdesc = fl->sdesc; | |
653 | d = fl->desc; | |
654 | } | |
655 | n--; | |
656 | } | |
657 | ||
658 | alloc_small_pages: | |
659 | while (n--) { | |
0614002b | 660 | page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL); |
c6e0d914 CL |
661 | if (unlikely(!page)) { |
662 | fl->alloc_failed++; | |
663 | break; | |
664 | } | |
665 | poison_buf(page, PAGE_SIZE); | |
666 | ||
667 | dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, | |
668 | PCI_DMA_FROMDEVICE); | |
669 | if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { | |
1f2149c1 | 670 | put_page(page); |
c6e0d914 CL |
671 | break; |
672 | } | |
673 | *d++ = cpu_to_be64(dma_addr); | |
674 | ||
675 | set_rx_sw_desc(sdesc, page, dma_addr); | |
676 | sdesc++; | |
677 | ||
678 | fl->avail++; | |
679 | if (++fl->pidx == fl->size) { | |
680 | fl->pidx = 0; | |
681 | sdesc = fl->sdesc; | |
682 | d = fl->desc; | |
683 | } | |
684 | } | |
685 | ||
686 | out: | |
687 | /* | |
688 | * Update our accounting state to incorporate the new Free List | |
689 | * buffers, tell the hardware about them and return the number of | |
90802ed9 | 690 | * buffers which we were able to allocate. |
c6e0d914 CL |
691 | */ |
692 | cred = fl->avail - cred; | |
693 | fl->pend_cred += cred; | |
694 | ring_fl_db(adapter, fl); | |
695 | ||
696 | if (unlikely(fl_starving(fl))) { | |
697 | smp_wmb(); | |
698 | set_bit(fl->cntxt_id, adapter->sge.starving_fl); | |
699 | } | |
700 | ||
701 | return cred; | |
702 | } | |
703 | ||
704 | /* | |
705 | * Refill a Free List to its capacity or the Maximum Refill Increment, | |
706 | * whichever is smaller ... | |
707 | */ | |
708 | static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) | |
709 | { | |
710 | refill_fl(adapter, fl, | |
711 | min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), | |
712 | GFP_ATOMIC); | |
713 | } | |
714 | ||
715 | /** | |
716 | * alloc_ring - allocate resources for an SGE descriptor ring | |
717 | * @dev: the PCI device's core device | |
718 | * @nelem: the number of descriptors | |
719 | * @hwsize: the size of each hardware descriptor | |
720 | * @swsize: the size of each software descriptor | |
721 | * @busaddrp: the physical PCI bus address of the allocated ring | |
722 | * @swringp: return address pointer for software ring | |
723 | * @stat_size: extra space in hardware ring for status information | |
724 | * | |
725 | * Allocates resources for an SGE descriptor ring, such as TX queues, | |
726 | * free buffer lists, response queues, etc. Each SGE ring requires | |
727 | * space for its hardware descriptors plus, optionally, space for software | |
728 | * state associated with each hardware entry (the metadata). The function | |
729 | * returns three values: the virtual address for the hardware ring (the | |
730 | * return value of the function), the PCI bus address of the hardware | |
731 | * ring (in *busaddrp), and the address of the software ring (in swringp). | |
732 | * Both the hardware and software rings are returned zeroed out. | |
733 | */ | |
734 | static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, | |
735 | size_t swsize, dma_addr_t *busaddrp, void *swringp, | |
736 | size_t stat_size) | |
737 | { | |
738 | /* | |
739 | * Allocate the hardware ring and PCI DMA bus address space for said. | |
740 | */ | |
741 | size_t hwlen = nelem * hwsize + stat_size; | |
742 | void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); | |
743 | ||
744 | if (!hwring) | |
745 | return NULL; | |
746 | ||
747 | /* | |
748 | * If the caller wants a software ring, allocate it and return a | |
749 | * pointer to it in *swringp. | |
750 | */ | |
751 | BUG_ON((swsize != 0) != (swringp != NULL)); | |
752 | if (swsize) { | |
753 | void *swring = kcalloc(nelem, swsize, GFP_KERNEL); | |
754 | ||
755 | if (!swring) { | |
756 | dma_free_coherent(dev, hwlen, hwring, *busaddrp); | |
757 | return NULL; | |
758 | } | |
759 | *(void **)swringp = swring; | |
760 | } | |
761 | ||
762 | /* | |
763 | * Zero out the hardware ring and return its address as our function | |
764 | * value. | |
765 | */ | |
766 | memset(hwring, 0, hwlen); | |
767 | return hwring; | |
768 | } | |
769 | ||
770 | /** | |
771 | * sgl_len - calculates the size of an SGL of the given capacity | |
772 | * @n: the number of SGL entries | |
773 | * | |
774 | * Calculates the number of flits (8-byte units) needed for a Direct | |
775 | * Scatter/Gather List that can hold the given number of entries. | |
776 | */ | |
777 | static inline unsigned int sgl_len(unsigned int n) | |
778 | { | |
779 | /* | |
780 | * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA | |
781 | * addresses. The DSGL Work Request starts off with a 32-bit DSGL | |
782 | * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, | |
783 | * repeated sequences of { Length[i], Length[i+1], Address[i], | |
784 | * Address[i+1] } (this ensures that all addresses are on 64-bit | |
785 | * boundaries). If N is even, then Length[N+1] should be set to 0 and | |
786 | * Address[N+1] is omitted. | |
787 | * | |
788 | * The following calculation incorporates all of the above. It's | |
789 | * somewhat hard to follow but, briefly: the "+2" accounts for the | |
790 | * first two flits which include the DSGL header, Length0 and | |
791 | * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 | |
792 | * flits for every pair of the remaining N) +1 if (n-1) is odd; and | |
793 | * finally the "+((n-1)&1)" adds the one remaining flit needed if | |
794 | * (n-1) is odd ... | |
795 | */ | |
796 | n--; | |
797 | return (3 * n) / 2 + (n & 1) + 2; | |
798 | } | |
799 | ||
800 | /** | |
801 | * flits_to_desc - returns the num of TX descriptors for the given flits | |
802 | * @flits: the number of flits | |
803 | * | |
804 | * Returns the number of TX descriptors needed for the supplied number | |
805 | * of flits. | |
806 | */ | |
807 | static inline unsigned int flits_to_desc(unsigned int flits) | |
808 | { | |
809 | BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64)); | |
810 | return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT); | |
811 | } | |
812 | ||
813 | /** | |
814 | * is_eth_imm - can an Ethernet packet be sent as immediate data? | |
815 | * @skb: the packet | |
816 | * | |
817 | * Returns whether an Ethernet packet is small enough to fit completely as | |
818 | * immediate data. | |
819 | */ | |
820 | static inline int is_eth_imm(const struct sk_buff *skb) | |
821 | { | |
822 | /* | |
823 | * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request | |
824 | * which does not accommodate immediate data. We could dike out all | |
825 | * of the support code for immediate data but that would tie our hands | |
826 | * too much if we ever want to enhace the firmware. It would also | |
827 | * create more differences between the PF and VF Drivers. | |
828 | */ | |
829 | return false; | |
830 | } | |
831 | ||
832 | /** | |
833 | * calc_tx_flits - calculate the number of flits for a packet TX WR | |
834 | * @skb: the packet | |
835 | * | |
836 | * Returns the number of flits needed for a TX Work Request for the | |
837 | * given Ethernet packet, including the needed WR and CPL headers. | |
838 | */ | |
839 | static inline unsigned int calc_tx_flits(const struct sk_buff *skb) | |
840 | { | |
841 | unsigned int flits; | |
842 | ||
843 | /* | |
844 | * If the skb is small enough, we can pump it out as a work request | |
845 | * with only immediate data. In that case we just have to have the | |
846 | * TX Packet header plus the skb data in the Work Request. | |
847 | */ | |
848 | if (is_eth_imm(skb)) | |
849 | return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), | |
850 | sizeof(__be64)); | |
851 | ||
852 | /* | |
853 | * Otherwise, we're going to have to construct a Scatter gather list | |
854 | * of the skb body and fragments. We also include the flits necessary | |
855 | * for the TX Packet Work Request and CPL. We always have a firmware | |
856 | * Write Header (incorporated as part of the cpl_tx_pkt_lso and | |
857 | * cpl_tx_pkt structures), followed by either a TX Packet Write CPL | |
858 | * message or, if we're doing a Large Send Offload, an LSO CPL message | |
859 | * with an embeded TX Packet Write CPL message. | |
860 | */ | |
861 | flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); | |
862 | if (skb_shinfo(skb)->gso_size) | |
863 | flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + | |
864 | sizeof(struct cpl_tx_pkt_lso_core) + | |
865 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); | |
866 | else | |
867 | flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + | |
868 | sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); | |
869 | return flits; | |
870 | } | |
871 | ||
872 | /** | |
873 | * write_sgl - populate a Scatter/Gather List for a packet | |
874 | * @skb: the packet | |
875 | * @tq: the TX queue we are writing into | |
876 | * @sgl: starting location for writing the SGL | |
877 | * @end: points right after the end of the SGL | |
878 | * @start: start offset into skb main-body data to include in the SGL | |
879 | * @addr: the list of DMA bus addresses for the SGL elements | |
880 | * | |
881 | * Generates a Scatter/Gather List for the buffers that make up a packet. | |
882 | * The caller must provide adequate space for the SGL that will be written. | |
883 | * The SGL includes all of the packet's page fragments and the data in its | |
884 | * main body except for the first @start bytes. @pos must be 16-byte | |
885 | * aligned and within a TX descriptor with available space. @end points | |
886 | * write after the end of the SGL but does not account for any potential | |
887 | * wrap around, i.e., @end > @tq->stat. | |
888 | */ | |
889 | static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, | |
890 | struct ulptx_sgl *sgl, u64 *end, unsigned int start, | |
891 | const dma_addr_t *addr) | |
892 | { | |
893 | unsigned int i, len; | |
894 | struct ulptx_sge_pair *to; | |
895 | const struct skb_shared_info *si = skb_shinfo(skb); | |
896 | unsigned int nfrags = si->nr_frags; | |
897 | struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; | |
898 | ||
899 | len = skb_headlen(skb) - start; | |
900 | if (likely(len)) { | |
901 | sgl->len0 = htonl(len); | |
902 | sgl->addr0 = cpu_to_be64(addr[0] + start); | |
903 | nfrags++; | |
904 | } else { | |
9e903e08 | 905 | sgl->len0 = htonl(skb_frag_size(&si->frags[0])); |
c6e0d914 CL |
906 | sgl->addr0 = cpu_to_be64(addr[1]); |
907 | } | |
908 | ||
909 | sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | | |
910 | ULPTX_NSGE(nfrags)); | |
911 | if (likely(--nfrags == 0)) | |
912 | return; | |
913 | /* | |
914 | * Most of the complexity below deals with the possibility we hit the | |
915 | * end of the queue in the middle of writing the SGL. For this case | |
916 | * only we create the SGL in a temporary buffer and then copy it. | |
917 | */ | |
918 | to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; | |
919 | ||
920 | for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { | |
9e903e08 ED |
921 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
922 | to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); | |
c6e0d914 CL |
923 | to->addr[0] = cpu_to_be64(addr[i]); |
924 | to->addr[1] = cpu_to_be64(addr[++i]); | |
925 | } | |
926 | if (nfrags) { | |
9e903e08 | 927 | to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); |
c6e0d914 CL |
928 | to->len[1] = cpu_to_be32(0); |
929 | to->addr[0] = cpu_to_be64(addr[i + 1]); | |
930 | } | |
931 | if (unlikely((u8 *)end > (u8 *)tq->stat)) { | |
932 | unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; | |
933 | ||
934 | if (likely(part0)) | |
935 | memcpy(sgl->sge, buf, part0); | |
936 | part1 = (u8 *)end - (u8 *)tq->stat; | |
937 | memcpy(tq->desc, (u8 *)buf + part0, part1); | |
938 | end = (void *)tq->desc + part1; | |
939 | } | |
940 | if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ | |
64699336 | 941 | *end = 0; |
c6e0d914 CL |
942 | } |
943 | ||
944 | /** | |
945 | * check_ring_tx_db - check and potentially ring a TX queue's doorbell | |
946 | * @adapter: the adapter | |
947 | * @tq: the TX queue | |
948 | * @n: number of new descriptors to give to HW | |
949 | * | |
950 | * Ring the doorbel for a TX queue. | |
951 | */ | |
952 | static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, | |
953 | int n) | |
954 | { | |
955 | /* | |
956 | * Warn if we write doorbells with the wrong priority and write | |
957 | * descriptors before telling HW. | |
958 | */ | |
ce91a923 | 959 | WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1)); |
c6e0d914 CL |
960 | wmb(); |
961 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, | |
962 | QID(tq->cntxt_id) | PIDX(n)); | |
963 | } | |
964 | ||
965 | /** | |
966 | * inline_tx_skb - inline a packet's data into TX descriptors | |
967 | * @skb: the packet | |
968 | * @tq: the TX queue where the packet will be inlined | |
969 | * @pos: starting position in the TX queue to inline the packet | |
970 | * | |
971 | * Inline a packet's contents directly into TX descriptors, starting at | |
972 | * the given position within the TX DMA ring. | |
973 | * Most of the complexity of this operation is dealing with wrap arounds | |
974 | * in the middle of the packet we want to inline. | |
975 | */ | |
976 | static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, | |
977 | void *pos) | |
978 | { | |
979 | u64 *p; | |
980 | int left = (void *)tq->stat - pos; | |
981 | ||
982 | if (likely(skb->len <= left)) { | |
983 | if (likely(!skb->data_len)) | |
984 | skb_copy_from_linear_data(skb, pos, skb->len); | |
985 | else | |
986 | skb_copy_bits(skb, 0, pos, skb->len); | |
987 | pos += skb->len; | |
988 | } else { | |
989 | skb_copy_bits(skb, 0, pos, left); | |
990 | skb_copy_bits(skb, left, tq->desc, skb->len - left); | |
991 | pos = (void *)tq->desc + (skb->len - left); | |
992 | } | |
993 | ||
994 | /* 0-pad to multiple of 16 */ | |
995 | p = PTR_ALIGN(pos, 8); | |
996 | if ((uintptr_t)p & 8) | |
997 | *p = 0; | |
998 | } | |
999 | ||
1000 | /* | |
1001 | * Figure out what HW csum a packet wants and return the appropriate control | |
1002 | * bits. | |
1003 | */ | |
1004 | static u64 hwcsum(const struct sk_buff *skb) | |
1005 | { | |
1006 | int csum_type; | |
1007 | const struct iphdr *iph = ip_hdr(skb); | |
1008 | ||
1009 | if (iph->version == 4) { | |
1010 | if (iph->protocol == IPPROTO_TCP) | |
1011 | csum_type = TX_CSUM_TCPIP; | |
1012 | else if (iph->protocol == IPPROTO_UDP) | |
1013 | csum_type = TX_CSUM_UDPIP; | |
1014 | else { | |
1015 | nocsum: | |
1016 | /* | |
1017 | * unknown protocol, disable HW csum | |
1018 | * and hope a bad packet is detected | |
1019 | */ | |
1020 | return TXPKT_L4CSUM_DIS; | |
1021 | } | |
1022 | } else { | |
1023 | /* | |
1024 | * this doesn't work with extension headers | |
1025 | */ | |
1026 | const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; | |
1027 | ||
1028 | if (ip6h->nexthdr == IPPROTO_TCP) | |
1029 | csum_type = TX_CSUM_TCPIP6; | |
1030 | else if (ip6h->nexthdr == IPPROTO_UDP) | |
1031 | csum_type = TX_CSUM_UDPIP6; | |
1032 | else | |
1033 | goto nocsum; | |
1034 | } | |
1035 | ||
1036 | if (likely(csum_type >= TX_CSUM_TCPIP)) | |
1037 | return TXPKT_CSUM_TYPE(csum_type) | | |
1038 | TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | | |
1039 | TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); | |
1040 | else { | |
1041 | int start = skb_transport_offset(skb); | |
1042 | ||
1043 | return TXPKT_CSUM_TYPE(csum_type) | | |
1044 | TXPKT_CSUM_START(start) | | |
1045 | TXPKT_CSUM_LOC(start + skb->csum_offset); | |
1046 | } | |
1047 | } | |
1048 | ||
1049 | /* | |
1050 | * Stop an Ethernet TX queue and record that state change. | |
1051 | */ | |
1052 | static void txq_stop(struct sge_eth_txq *txq) | |
1053 | { | |
1054 | netif_tx_stop_queue(txq->txq); | |
1055 | txq->q.stops++; | |
1056 | } | |
1057 | ||
1058 | /* | |
1059 | * Advance our software state for a TX queue by adding n in use descriptors. | |
1060 | */ | |
1061 | static inline void txq_advance(struct sge_txq *tq, unsigned int n) | |
1062 | { | |
1063 | tq->in_use += n; | |
1064 | tq->pidx += n; | |
1065 | if (tq->pidx >= tq->size) | |
1066 | tq->pidx -= tq->size; | |
1067 | } | |
1068 | ||
1069 | /** | |
1070 | * t4vf_eth_xmit - add a packet to an Ethernet TX queue | |
1071 | * @skb: the packet | |
1072 | * @dev: the egress net device | |
1073 | * | |
1074 | * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. | |
1075 | */ | |
1076 | int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |
1077 | { | |
7f9dd2fa | 1078 | u32 wr_mid; |
c6e0d914 CL |
1079 | u64 cntrl, *end; |
1080 | int qidx, credits; | |
1081 | unsigned int flits, ndesc; | |
1082 | struct adapter *adapter; | |
1083 | struct sge_eth_txq *txq; | |
1084 | const struct port_info *pi; | |
1085 | struct fw_eth_tx_pkt_vm_wr *wr; | |
1086 | struct cpl_tx_pkt_core *cpl; | |
1087 | const struct skb_shared_info *ssi; | |
1088 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | |
1089 | const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + | |
1090 | sizeof(wr->ethmacsrc) + | |
1091 | sizeof(wr->ethtype) + | |
1092 | sizeof(wr->vlantci)); | |
1093 | ||
1094 | /* | |
1095 | * The chip minimum packet length is 10 octets but the firmware | |
1096 | * command that we are using requires that we copy the Ethernet header | |
1097 | * (including the VLAN tag) into the header so we reject anything | |
1098 | * smaller than that ... | |
1099 | */ | |
1100 | if (unlikely(skb->len < fw_hdr_copy_len)) | |
1101 | goto out_free; | |
1102 | ||
1103 | /* | |
1104 | * Figure out which TX Queue we're going to use. | |
1105 | */ | |
1106 | pi = netdev_priv(dev); | |
1107 | adapter = pi->adapter; | |
1108 | qidx = skb_get_queue_mapping(skb); | |
1109 | BUG_ON(qidx >= pi->nqsets); | |
1110 | txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; | |
1111 | ||
1112 | /* | |
1113 | * Take this opportunity to reclaim any TX Descriptors whose DMA | |
1114 | * transfers have completed. | |
1115 | */ | |
1116 | reclaim_completed_tx(adapter, &txq->q, true); | |
1117 | ||
1118 | /* | |
1119 | * Calculate the number of flits and TX Descriptors we're going to | |
1120 | * need along with how many TX Descriptors will be left over after | |
1121 | * we inject our Work Request. | |
1122 | */ | |
1123 | flits = calc_tx_flits(skb); | |
1124 | ndesc = flits_to_desc(flits); | |
1125 | credits = txq_avail(&txq->q) - ndesc; | |
1126 | ||
1127 | if (unlikely(credits < 0)) { | |
1128 | /* | |
1129 | * Not enough room for this packet's Work Request. Stop the | |
1130 | * TX Queue and return a "busy" condition. The queue will get | |
1131 | * started later on when the firmware informs us that space | |
1132 | * has opened up. | |
1133 | */ | |
1134 | txq_stop(txq); | |
1135 | dev_err(adapter->pdev_dev, | |
1136 | "%s: TX ring %u full while queue awake!\n", | |
1137 | dev->name, qidx); | |
1138 | return NETDEV_TX_BUSY; | |
1139 | } | |
1140 | ||
1141 | if (!is_eth_imm(skb) && | |
1142 | unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { | |
1143 | /* | |
1144 | * We need to map the skb into PCI DMA space (because it can't | |
1145 | * be in-lined directly into the Work Request) and the mapping | |
1146 | * operation failed. Record the error and drop the packet. | |
1147 | */ | |
1148 | txq->mapping_err++; | |
1149 | goto out_free; | |
1150 | } | |
1151 | ||
7f9dd2fa | 1152 | wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); |
c6e0d914 CL |
1153 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { |
1154 | /* | |
1155 | * After we're done injecting the Work Request for this | |
25985edc | 1156 | * packet, we'll be below our "stop threshold" so stop the TX |
7f9dd2fa CL |
1157 | * Queue now and schedule a request for an SGE Egress Queue |
1158 | * Update message. The queue will get started later on when | |
1159 | * the firmware processes this Work Request and sends us an | |
1160 | * Egress Queue Status Update message indicating that space | |
1161 | * has opened up. | |
c6e0d914 CL |
1162 | */ |
1163 | txq_stop(txq); | |
7f9dd2fa | 1164 | wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; |
c6e0d914 CL |
1165 | } |
1166 | ||
1167 | /* | |
1168 | * Start filling in our Work Request. Note that we do _not_ handle | |
1169 | * the WR Header wrapping around the TX Descriptor Ring. If our | |
1170 | * maximum header size ever exceeds one TX Descriptor, we'll need to | |
1171 | * do something else here. | |
1172 | */ | |
1173 | BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); | |
1174 | wr = (void *)&txq->q.desc[txq->q.pidx]; | |
7f9dd2fa | 1175 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); |
c6e0d914 CL |
1176 | wr->r3[0] = cpu_to_be64(0); |
1177 | wr->r3[1] = cpu_to_be64(0); | |
1178 | skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); | |
1179 | end = (u64 *)wr + flits; | |
1180 | ||
1181 | /* | |
1182 | * If this is a Large Send Offload packet we'll put in an LSO CPL | |
1183 | * message with an encapsulated TX Packet CPL message. Otherwise we | |
1184 | * just use a TX Packet CPL message. | |
1185 | */ | |
1186 | ssi = skb_shinfo(skb); | |
1187 | if (ssi->gso_size) { | |
1188 | struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); | |
1189 | bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; | |
1190 | int l3hdr_len = skb_network_header_len(skb); | |
1191 | int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; | |
1192 | ||
1193 | wr->op_immdlen = | |
1194 | cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | | |
1195 | FW_WR_IMMDLEN(sizeof(*lso) + | |
1196 | sizeof(*cpl))); | |
1197 | /* | |
1198 | * Fill in the LSO CPL message. | |
1199 | */ | |
1200 | lso->lso_ctrl = | |
1201 | cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) | | |
1202 | LSO_FIRST_SLICE | | |
1203 | LSO_LAST_SLICE | | |
1204 | LSO_IPV6(v6) | | |
1205 | LSO_ETHHDR_LEN(eth_xtra_len/4) | | |
1206 | LSO_IPHDR_LEN(l3hdr_len/4) | | |
1207 | LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); | |
1208 | lso->ipid_ofst = cpu_to_be16(0); | |
1209 | lso->mss = cpu_to_be16(ssi->gso_size); | |
1210 | lso->seqno_offset = cpu_to_be32(0); | |
1211 | lso->len = cpu_to_be32(skb->len); | |
1212 | ||
1213 | /* | |
1214 | * Set up TX Packet CPL pointer, control word and perform | |
1215 | * accounting. | |
1216 | */ | |
1217 | cpl = (void *)(lso + 1); | |
1218 | cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | | |
1219 | TXPKT_IPHDR_LEN(l3hdr_len) | | |
1220 | TXPKT_ETHHDR_LEN(eth_xtra_len)); | |
1221 | txq->tso++; | |
1222 | txq->tx_cso += ssi->gso_segs; | |
1223 | } else { | |
1224 | int len; | |
1225 | ||
1226 | len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); | |
1227 | wr->op_immdlen = | |
1228 | cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | | |
1229 | FW_WR_IMMDLEN(len)); | |
1230 | ||
1231 | /* | |
1232 | * Set up TX Packet CPL pointer, control word and perform | |
1233 | * accounting. | |
1234 | */ | |
1235 | cpl = (void *)(wr + 1); | |
1236 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1237 | cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; | |
1238 | txq->tx_cso++; | |
1239 | } else | |
1240 | cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; | |
1241 | } | |
1242 | ||
1243 | /* | |
1244 | * If there's a VLAN tag present, add that to the list of things to | |
1245 | * do in this Work Request. | |
1246 | */ | |
1247 | if (vlan_tx_tag_present(skb)) { | |
1248 | txq->vlan_ins++; | |
1249 | cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); | |
1250 | } | |
1251 | ||
1252 | /* | |
1253 | * Fill in the TX Packet CPL message header. | |
1254 | */ | |
1255 | cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) | | |
1256 | TXPKT_INTF(pi->port_id) | | |
1257 | TXPKT_PF(0)); | |
1258 | cpl->pack = cpu_to_be16(0); | |
1259 | cpl->len = cpu_to_be16(skb->len); | |
1260 | cpl->ctrl1 = cpu_to_be64(cntrl); | |
1261 | ||
1262 | #ifdef T4_TRACE | |
1263 | T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], | |
1264 | "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u", | |
1265 | ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); | |
1266 | #endif | |
1267 | ||
1268 | /* | |
1269 | * Fill in the body of the TX Packet CPL message with either in-lined | |
1270 | * data or a Scatter/Gather List. | |
1271 | */ | |
1272 | if (is_eth_imm(skb)) { | |
1273 | /* | |
1274 | * In-line the packet's data and free the skb since we don't | |
1275 | * need it any longer. | |
1276 | */ | |
1277 | inline_tx_skb(skb, &txq->q, cpl + 1); | |
1278 | dev_kfree_skb(skb); | |
1279 | } else { | |
1280 | /* | |
1281 | * Write the skb's Scatter/Gather list into the TX Packet CPL | |
1282 | * message and retain a pointer to the skb so we can free it | |
1283 | * later when its DMA completes. (We store the skb pointer | |
1284 | * in the Software Descriptor corresponding to the last TX | |
1285 | * Descriptor used by the Work Request.) | |
1286 | * | |
1287 | * The retained skb will be freed when the corresponding TX | |
1288 | * Descriptors are reclaimed after their DMAs complete. | |
1289 | * However, this could take quite a while since, in general, | |
1290 | * the hardware is set up to be lazy about sending DMA | |
1291 | * completion notifications to us and we mostly perform TX | |
1292 | * reclaims in the transmit routine. | |
1293 | * | |
1294 | * This is good for performamce but means that we rely on new | |
1295 | * TX packets arriving to run the destructors of completed | |
1296 | * packets, which open up space in their sockets' send queues. | |
1297 | * Sometimes we do not get such new packets causing TX to | |
1298 | * stall. A single UDP transmitter is a good example of this | |
1299 | * situation. We have a clean up timer that periodically | |
1300 | * reclaims completed packets but it doesn't run often enough | |
1301 | * (nor do we want it to) to prevent lengthy stalls. A | |
1302 | * solution to this problem is to run the destructor early, | |
1303 | * after the packet is queued but before it's DMAd. A con is | |
1304 | * that we lie to socket memory accounting, but the amount of | |
1305 | * extra memory is reasonable (limited by the number of TX | |
1306 | * descriptors), the packets do actually get freed quickly by | |
1307 | * new packets almost always, and for protocols like TCP that | |
1308 | * wait for acks to really free up the data the extra memory | |
1309 | * is even less. On the positive side we run the destructors | |
1310 | * on the sending CPU rather than on a potentially different | |
64bb336c | 1311 | * completing CPU, usually a good thing. |
c6e0d914 CL |
1312 | * |
1313 | * Run the destructor before telling the DMA engine about the | |
1314 | * packet to make sure it doesn't complete and get freed | |
1315 | * prematurely. | |
1316 | */ | |
1317 | struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); | |
1318 | struct sge_txq *tq = &txq->q; | |
1319 | int last_desc; | |
1320 | ||
1321 | /* | |
1322 | * If the Work Request header was an exact multiple of our TX | |
1323 | * Descriptor length, then it's possible that the starting SGL | |
1324 | * pointer lines up exactly with the end of our TX Descriptor | |
1325 | * ring. If that's the case, wrap around to the beginning | |
1326 | * here ... | |
1327 | */ | |
1328 | if (unlikely((void *)sgl == (void *)tq->stat)) { | |
1329 | sgl = (void *)tq->desc; | |
64699336 | 1330 | end = ((void *)tq->desc + ((void *)end - (void *)tq->stat)); |
c6e0d914 CL |
1331 | } |
1332 | ||
1333 | write_sgl(skb, tq, sgl, end, 0, addr); | |
1334 | skb_orphan(skb); | |
1335 | ||
1336 | last_desc = tq->pidx + ndesc - 1; | |
1337 | if (last_desc >= tq->size) | |
1338 | last_desc -= tq->size; | |
1339 | tq->sdesc[last_desc].skb = skb; | |
1340 | tq->sdesc[last_desc].sgl = sgl; | |
1341 | } | |
1342 | ||
1343 | /* | |
1344 | * Advance our internal TX Queue state, tell the hardware about | |
1345 | * the new TX descriptors and return success. | |
1346 | */ | |
1347 | txq_advance(&txq->q, ndesc); | |
1348 | dev->trans_start = jiffies; | |
1349 | ring_tx_db(adapter, &txq->q, ndesc); | |
1350 | return NETDEV_TX_OK; | |
1351 | ||
1352 | out_free: | |
1353 | /* | |
1354 | * An error of some sort happened. Free the TX skb and tell the | |
1355 | * OS that we've "dealt" with the packet ... | |
1356 | */ | |
1357 | dev_kfree_skb(skb); | |
1358 | return NETDEV_TX_OK; | |
1359 | } | |
1360 | ||
a0006a86 IC |
1361 | /** |
1362 | * copy_frags - copy fragments from gather list into skb_shared_info | |
1363 | * @skb: destination skb | |
1364 | * @gl: source internal packet gather list | |
1365 | * @offset: packet start offset in first page | |
1366 | * | |
1367 | * Copy an internal packet gather list into a Linux skb_shared_info | |
1368 | * structure. | |
1369 | */ | |
1370 | static inline void copy_frags(struct sk_buff *skb, | |
1371 | const struct pkt_gl *gl, | |
1372 | unsigned int offset) | |
1373 | { | |
1374 | int i; | |
1375 | ||
1376 | /* usually there's just one frag */ | |
1377 | __skb_fill_page_desc(skb, 0, gl->frags[0].page, | |
1378 | gl->frags[0].offset + offset, | |
1379 | gl->frags[0].size - offset); | |
1380 | skb_shinfo(skb)->nr_frags = gl->nfrags; | |
1381 | for (i = 1; i < gl->nfrags; i++) | |
1382 | __skb_fill_page_desc(skb, i, gl->frags[i].page, | |
1383 | gl->frags[i].offset, | |
1384 | gl->frags[i].size); | |
1385 | ||
1386 | /* get a reference to the last page, we don't own it */ | |
1387 | get_page(gl->frags[gl->nfrags - 1].page); | |
1388 | } | |
1389 | ||
eb6c503d CL |
1390 | /** |
1391 | * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list | |
1392 | * @gl: the gather list | |
1393 | * @skb_len: size of sk_buff main body if it carries fragments | |
1394 | * @pull_len: amount of data to move to the sk_buff's main body | |
1395 | * | |
1396 | * Builds an sk_buff from the given packet gather list. Returns the | |
1397 | * sk_buff or %NULL if sk_buff allocation failed. | |
1398 | */ | |
8a67d1c6 SK |
1399 | static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, |
1400 | unsigned int skb_len, | |
1401 | unsigned int pull_len) | |
eb6c503d CL |
1402 | { |
1403 | struct sk_buff *skb; | |
eb6c503d CL |
1404 | |
1405 | /* | |
1406 | * If the ingress packet is small enough, allocate an skb large enough | |
1407 | * for all of the data and copy it inline. Otherwise, allocate an skb | |
1408 | * with enough room to pull in the header and reference the rest of | |
1409 | * the data via the skb fragment list. | |
1410 | * | |
1411 | * Below we rely on RX_COPY_THRES being less than the smallest Rx | |
1412 | * buff! size, which is expected since buffers are at least | |
1413 | * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one | |
1414 | * fragment. | |
1415 | */ | |
1416 | if (gl->tot_len <= RX_COPY_THRES) { | |
1417 | /* small packets have only one fragment */ | |
1418 | skb = alloc_skb(gl->tot_len, GFP_ATOMIC); | |
1419 | if (unlikely(!skb)) | |
1420 | goto out; | |
1421 | __skb_put(skb, gl->tot_len); | |
1422 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | |
1423 | } else { | |
1424 | skb = alloc_skb(skb_len, GFP_ATOMIC); | |
1425 | if (unlikely(!skb)) | |
1426 | goto out; | |
1427 | __skb_put(skb, pull_len); | |
1428 | skb_copy_to_linear_data(skb, gl->va, pull_len); | |
1429 | ||
a0006a86 | 1430 | copy_frags(skb, gl, pull_len); |
eb6c503d CL |
1431 | skb->len = gl->tot_len; |
1432 | skb->data_len = skb->len - pull_len; | |
1433 | skb->truesize += skb->data_len; | |
eb6c503d CL |
1434 | } |
1435 | ||
1436 | out: | |
1437 | return skb; | |
1438 | } | |
1439 | ||
c6e0d914 CL |
1440 | /** |
1441 | * t4vf_pktgl_free - free a packet gather list | |
1442 | * @gl: the gather list | |
1443 | * | |
1444 | * Releases the pages of a packet gather list. We do not own the last | |
1445 | * page on the list and do not free it. | |
1446 | */ | |
8a67d1c6 | 1447 | static void t4vf_pktgl_free(const struct pkt_gl *gl) |
c6e0d914 CL |
1448 | { |
1449 | int frag; | |
1450 | ||
1451 | frag = gl->nfrags - 1; | |
1452 | while (frag--) | |
1453 | put_page(gl->frags[frag].page); | |
1454 | } | |
1455 | ||
c6e0d914 CL |
1456 | /** |
1457 | * do_gro - perform Generic Receive Offload ingress packet processing | |
1458 | * @rxq: ingress RX Ethernet Queue | |
1459 | * @gl: gather list for ingress packet | |
1460 | * @pkt: CPL header for last packet fragment | |
1461 | * | |
1462 | * Perform Generic Receive Offload (GRO) ingress packet processing. | |
1463 | * We use the standard Linux GRO interfaces for this. | |
1464 | */ | |
1465 | static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, | |
1466 | const struct cpl_rx_pkt *pkt) | |
1467 | { | |
1468 | int ret; | |
1469 | struct sk_buff *skb; | |
1470 | ||
1471 | skb = napi_get_frags(&rxq->rspq.napi); | |
1472 | if (unlikely(!skb)) { | |
1473 | t4vf_pktgl_free(gl); | |
1474 | rxq->stats.rx_drops++; | |
1475 | return; | |
1476 | } | |
1477 | ||
a0006a86 | 1478 | copy_frags(skb, gl, PKTSHIFT); |
c6e0d914 CL |
1479 | skb->len = gl->tot_len - PKTSHIFT; |
1480 | skb->data_len = skb->len; | |
1481 | skb->truesize += skb->data_len; | |
1482 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1483 | skb_record_rx_queue(skb, rxq->rspq.idx); | |
1484 | ||
af32de0e | 1485 | if (pkt->vlan_ex) { |
86a9bad3 PM |
1486 | __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), |
1487 | be16_to_cpu(pkt->vlan)); | |
af32de0e VP |
1488 | rxq->stats.vlan_ex++; |
1489 | } | |
c6e0d914 CL |
1490 | ret = napi_gro_frags(&rxq->rspq.napi); |
1491 | ||
c6e0d914 CL |
1492 | if (ret == GRO_HELD) |
1493 | rxq->stats.lro_pkts++; | |
1494 | else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) | |
1495 | rxq->stats.lro_merged++; | |
1496 | rxq->stats.pkts++; | |
1497 | rxq->stats.rx_cso++; | |
1498 | } | |
1499 | ||
1500 | /** | |
1501 | * t4vf_ethrx_handler - process an ingress ethernet packet | |
1502 | * @rspq: the response queue that received the packet | |
1503 | * @rsp: the response queue descriptor holding the RX_PKT message | |
1504 | * @gl: the gather list of packet fragments | |
1505 | * | |
1506 | * Process an ingress ethernet packet and deliver it to the stack. | |
1507 | */ | |
1508 | int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |
1509 | const struct pkt_gl *gl) | |
1510 | { | |
1511 | struct sk_buff *skb; | |
8b9a4d56 | 1512 | const struct cpl_rx_pkt *pkt = (void *)rsp; |
c6e0d914 | 1513 | bool csum_ok = pkt->csum_calc && !pkt->err_vec; |
c6e0d914 CL |
1514 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1515 | ||
1516 | /* | |
1517 | * If this is a good TCP packet and we have Generic Receive Offload | |
1518 | * enabled, handle the packet in the GRO path. | |
1519 | */ | |
1520 | if ((pkt->l2info & cpu_to_be32(RXF_TCP)) && | |
1521 | (rspq->netdev->features & NETIF_F_GRO) && csum_ok && | |
1522 | !pkt->ip_frag) { | |
1523 | do_gro(rxq, gl, pkt); | |
1524 | return 0; | |
1525 | } | |
1526 | ||
1527 | /* | |
eb6c503d | 1528 | * Convert the Packet Gather List into an skb. |
c6e0d914 | 1529 | */ |
eb6c503d CL |
1530 | skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); |
1531 | if (unlikely(!skb)) { | |
1532 | t4vf_pktgl_free(gl); | |
1533 | rxq->stats.rx_drops++; | |
1534 | return 0; | |
c6e0d914 | 1535 | } |
c6e0d914 CL |
1536 | __skb_pull(skb, PKTSHIFT); |
1537 | skb->protocol = eth_type_trans(skb, rspq->netdev); | |
1538 | skb_record_rx_queue(skb, rspq->idx); | |
c6e0d914 CL |
1539 | rxq->stats.pkts++; |
1540 | ||
2ed28baa MM |
1541 | if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && |
1542 | !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { | |
c6e0d914 CL |
1543 | if (!pkt->ip_frag) |
1544 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1545 | else { | |
1546 | __sum16 c = (__force __sum16)pkt->csum; | |
1547 | skb->csum = csum_unfold(c); | |
1548 | skb->ip_summed = CHECKSUM_COMPLETE; | |
1549 | } | |
1550 | rxq->stats.rx_cso++; | |
1551 | } else | |
bc8acf2c | 1552 | skb_checksum_none_assert(skb); |
c6e0d914 | 1553 | |
87737663 | 1554 | if (pkt->vlan_ex) { |
c6e0d914 | 1555 | rxq->stats.vlan_ex++; |
86a9bad3 | 1556 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan)); |
87737663 JP |
1557 | } |
1558 | ||
1559 | netif_receive_skb(skb); | |
c6e0d914 CL |
1560 | |
1561 | return 0; | |
c6e0d914 CL |
1562 | } |
1563 | ||
1564 | /** | |
1565 | * is_new_response - check if a response is newly written | |
1566 | * @rc: the response control descriptor | |
1567 | * @rspq: the response queue | |
1568 | * | |
1569 | * Returns true if a response descriptor contains a yet unprocessed | |
1570 | * response. | |
1571 | */ | |
1572 | static inline bool is_new_response(const struct rsp_ctrl *rc, | |
1573 | const struct sge_rspq *rspq) | |
1574 | { | |
1575 | return RSPD_GEN(rc->type_gen) == rspq->gen; | |
1576 | } | |
1577 | ||
1578 | /** | |
1579 | * restore_rx_bufs - put back a packet's RX buffers | |
1580 | * @gl: the packet gather list | |
1581 | * @fl: the SGE Free List | |
1582 | * @nfrags: how many fragments in @si | |
1583 | * | |
1584 | * Called when we find out that the current packet, @si, can't be | |
1585 | * processed right away for some reason. This is a very rare event and | |
1586 | * there's no effort to make this suspension/resumption process | |
1587 | * particularly efficient. | |
1588 | * | |
1589 | * We implement the suspension by putting all of the RX buffers associated | |
1590 | * with the current packet back on the original Free List. The buffers | |
1591 | * have already been unmapped and are left unmapped, we mark them as | |
1592 | * unmapped in order to prevent further unmapping attempts. (Effectively | |
1593 | * this function undoes the series of @unmap_rx_buf calls which were done | |
1594 | * to create the current packet's gather list.) This leaves us ready to | |
1595 | * restart processing of the packet the next time we start processing the | |
1596 | * RX Queue ... | |
1597 | */ | |
1598 | static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, | |
1599 | int frags) | |
1600 | { | |
1601 | struct rx_sw_desc *sdesc; | |
1602 | ||
1603 | while (frags--) { | |
1604 | if (fl->cidx == 0) | |
1605 | fl->cidx = fl->size - 1; | |
1606 | else | |
1607 | fl->cidx--; | |
1608 | sdesc = &fl->sdesc[fl->cidx]; | |
1609 | sdesc->page = gl->frags[frags].page; | |
1610 | sdesc->dma_addr |= RX_UNMAPPED_BUF; | |
1611 | fl->avail++; | |
1612 | } | |
1613 | } | |
1614 | ||
1615 | /** | |
1616 | * rspq_next - advance to the next entry in a response queue | |
1617 | * @rspq: the queue | |
1618 | * | |
1619 | * Updates the state of a response queue to advance it to the next entry. | |
1620 | */ | |
1621 | static inline void rspq_next(struct sge_rspq *rspq) | |
1622 | { | |
1623 | rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; | |
1624 | if (unlikely(++rspq->cidx == rspq->size)) { | |
1625 | rspq->cidx = 0; | |
1626 | rspq->gen ^= 1; | |
1627 | rspq->cur_desc = rspq->desc; | |
1628 | } | |
1629 | } | |
1630 | ||
1631 | /** | |
1632 | * process_responses - process responses from an SGE response queue | |
1633 | * @rspq: the ingress response queue to process | |
1634 | * @budget: how many responses can be processed in this round | |
1635 | * | |
1636 | * Process responses from a Scatter Gather Engine response queue up to | |
1637 | * the supplied budget. Responses include received packets as well as | |
1638 | * control messages from firmware or hardware. | |
1639 | * | |
1640 | * Additionally choose the interrupt holdoff time for the next interrupt | |
1641 | * on this queue. If the system is under memory shortage use a fairly | |
1642 | * long delay to help recovery. | |
1643 | */ | |
8a67d1c6 | 1644 | static int process_responses(struct sge_rspq *rspq, int budget) |
c6e0d914 CL |
1645 | { |
1646 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); | |
1647 | int budget_left = budget; | |
1648 | ||
1649 | while (likely(budget_left)) { | |
1650 | int ret, rsp_type; | |
1651 | const struct rsp_ctrl *rc; | |
1652 | ||
1653 | rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); | |
1654 | if (!is_new_response(rc, rspq)) | |
1655 | break; | |
1656 | ||
1657 | /* | |
1658 | * Figure out what kind of response we've received from the | |
1659 | * SGE. | |
1660 | */ | |
1661 | rmb(); | |
1662 | rsp_type = RSPD_TYPE(rc->type_gen); | |
1663 | if (likely(rsp_type == RSP_TYPE_FLBUF)) { | |
a0006a86 | 1664 | struct page_frag *fp; |
c6e0d914 CL |
1665 | struct pkt_gl gl; |
1666 | const struct rx_sw_desc *sdesc; | |
1667 | u32 bufsz, frag; | |
1668 | u32 len = be32_to_cpu(rc->pldbuflen_qid); | |
1669 | ||
1670 | /* | |
1671 | * If we get a "new buffer" message from the SGE we | |
1672 | * need to move on to the next Free List buffer. | |
1673 | */ | |
1674 | if (len & RSPD_NEWBUF) { | |
1675 | /* | |
1676 | * We get one "new buffer" message when we | |
1677 | * first start up a queue so we need to ignore | |
1678 | * it when our offset into the buffer is 0. | |
1679 | */ | |
1680 | if (likely(rspq->offset > 0)) { | |
1681 | free_rx_bufs(rspq->adapter, &rxq->fl, | |
1682 | 1); | |
1683 | rspq->offset = 0; | |
1684 | } | |
1685 | len = RSPD_LEN(len); | |
1686 | } | |
b94e72e2 | 1687 | gl.tot_len = len; |
c6e0d914 CL |
1688 | |
1689 | /* | |
1690 | * Gather packet fragments. | |
1691 | */ | |
1692 | for (frag = 0, fp = gl.frags; /**/; frag++, fp++) { | |
1693 | BUG_ON(frag >= MAX_SKB_FRAGS); | |
1694 | BUG_ON(rxq->fl.avail == 0); | |
1695 | sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; | |
1696 | bufsz = get_buf_size(sdesc); | |
1697 | fp->page = sdesc->page; | |
a0006a86 IC |
1698 | fp->offset = rspq->offset; |
1699 | fp->size = min(bufsz, len); | |
1700 | len -= fp->size; | |
c6e0d914 CL |
1701 | if (!len) |
1702 | break; | |
1703 | unmap_rx_buf(rspq->adapter, &rxq->fl); | |
1704 | } | |
1705 | gl.nfrags = frag+1; | |
1706 | ||
1707 | /* | |
1708 | * Last buffer remains mapped so explicitly make it | |
1709 | * coherent for CPU access and start preloading first | |
1710 | * cache line ... | |
1711 | */ | |
1712 | dma_sync_single_for_cpu(rspq->adapter->pdev_dev, | |
1713 | get_buf_addr(sdesc), | |
a0006a86 | 1714 | fp->size, DMA_FROM_DEVICE); |
c6e0d914 | 1715 | gl.va = (page_address(gl.frags[0].page) + |
a0006a86 | 1716 | gl.frags[0].offset); |
c6e0d914 CL |
1717 | prefetch(gl.va); |
1718 | ||
1719 | /* | |
1720 | * Hand the new ingress packet to the handler for | |
1721 | * this Response Queue. | |
1722 | */ | |
1723 | ret = rspq->handler(rspq, rspq->cur_desc, &gl); | |
1724 | if (likely(ret == 0)) | |
a0006a86 | 1725 | rspq->offset += ALIGN(fp->size, FL_ALIGN); |
c6e0d914 CL |
1726 | else |
1727 | restore_rx_bufs(&gl, &rxq->fl, frag); | |
1728 | } else if (likely(rsp_type == RSP_TYPE_CPL)) { | |
1729 | ret = rspq->handler(rspq, rspq->cur_desc, NULL); | |
1730 | } else { | |
1731 | WARN_ON(rsp_type > RSP_TYPE_CPL); | |
1732 | ret = 0; | |
1733 | } | |
1734 | ||
1735 | if (unlikely(ret)) { | |
1736 | /* | |
1737 | * Couldn't process descriptor, back off for recovery. | |
1738 | * We use the SGE's last timer which has the longest | |
1739 | * interrupt coalescing value ... | |
1740 | */ | |
1741 | const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; | |
1742 | rspq->next_intr_params = | |
1743 | QINTR_TIMER_IDX(NOMEM_TIMER_IDX); | |
1744 | break; | |
1745 | } | |
1746 | ||
1747 | rspq_next(rspq); | |
1748 | budget_left--; | |
1749 | } | |
1750 | ||
1751 | /* | |
1752 | * If this is a Response Queue with an associated Free List and | |
1753 | * at least two Egress Queue units available in the Free List | |
1754 | * for new buffer pointers, refill the Free List. | |
1755 | */ | |
1756 | if (rspq->offset >= 0 && | |
1757 | rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) | |
1758 | __refill_fl(rspq->adapter, &rxq->fl); | |
1759 | return budget - budget_left; | |
1760 | } | |
1761 | ||
1762 | /** | |
1763 | * napi_rx_handler - the NAPI handler for RX processing | |
1764 | * @napi: the napi instance | |
1765 | * @budget: how many packets we can process in this round | |
1766 | * | |
1767 | * Handler for new data events when using NAPI. This does not need any | |
1768 | * locking or protection from interrupts as data interrupts are off at | |
1769 | * this point and other adapter interrupts do not interfere (the latter | |
1770 | * in not a concern at all with MSI-X as non-data interrupts then have | |
1771 | * a separate handler). | |
1772 | */ | |
1773 | static int napi_rx_handler(struct napi_struct *napi, int budget) | |
1774 | { | |
1775 | unsigned int intr_params; | |
1776 | struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); | |
1777 | int work_done = process_responses(rspq, budget); | |
1778 | ||
1779 | if (likely(work_done < budget)) { | |
1780 | napi_complete(napi); | |
1781 | intr_params = rspq->next_intr_params; | |
1782 | rspq->next_intr_params = rspq->intr_params; | |
1783 | } else | |
1784 | intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX); | |
1785 | ||
68dc9d36 CL |
1786 | if (unlikely(work_done == 0)) |
1787 | rspq->unhandled_irqs++; | |
1788 | ||
c6e0d914 CL |
1789 | t4_write_reg(rspq->adapter, |
1790 | T4VF_SGE_BASE_ADDR + SGE_VF_GTS, | |
1791 | CIDXINC(work_done) | | |
1792 | INGRESSQID((u32)rspq->cntxt_id) | | |
1793 | SEINTARM(intr_params)); | |
1794 | return work_done; | |
1795 | } | |
1796 | ||
1797 | /* | |
1798 | * The MSI-X interrupt handler for an SGE response queue for the NAPI case | |
1799 | * (i.e., response queue serviced by NAPI polling). | |
1800 | */ | |
1801 | irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie) | |
1802 | { | |
1803 | struct sge_rspq *rspq = cookie; | |
1804 | ||
1805 | napi_schedule(&rspq->napi); | |
1806 | return IRQ_HANDLED; | |
1807 | } | |
1808 | ||
1809 | /* | |
1810 | * Process the indirect interrupt entries in the interrupt queue and kick off | |
1811 | * NAPI for each queue that has generated an entry. | |
1812 | */ | |
1813 | static unsigned int process_intrq(struct adapter *adapter) | |
1814 | { | |
1815 | struct sge *s = &adapter->sge; | |
1816 | struct sge_rspq *intrq = &s->intrq; | |
1817 | unsigned int work_done; | |
1818 | ||
1819 | spin_lock(&adapter->sge.intrq_lock); | |
1820 | for (work_done = 0; ; work_done++) { | |
1821 | const struct rsp_ctrl *rc; | |
1822 | unsigned int qid, iq_idx; | |
1823 | struct sge_rspq *rspq; | |
1824 | ||
1825 | /* | |
1826 | * Grab the next response from the interrupt queue and bail | |
1827 | * out if it's not a new response. | |
1828 | */ | |
1829 | rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc)); | |
1830 | if (!is_new_response(rc, intrq)) | |
1831 | break; | |
1832 | ||
1833 | /* | |
1834 | * If the response isn't a forwarded interrupt message issue a | |
1835 | * error and go on to the next response message. This should | |
1836 | * never happen ... | |
1837 | */ | |
1838 | rmb(); | |
1839 | if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) { | |
1840 | dev_err(adapter->pdev_dev, | |
1841 | "Unexpected INTRQ response type %d\n", | |
1842 | RSPD_TYPE(rc->type_gen)); | |
1843 | continue; | |
1844 | } | |
1845 | ||
1846 | /* | |
1847 | * Extract the Queue ID from the interrupt message and perform | |
1848 | * sanity checking to make sure it really refers to one of our | |
1849 | * Ingress Queues which is active and matches the queue's ID. | |
1850 | * None of these error conditions should ever happen so we may | |
1851 | * want to either make them fatal and/or conditionalized under | |
1852 | * DEBUG. | |
1853 | */ | |
1854 | qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid)); | |
1855 | iq_idx = IQ_IDX(s, qid); | |
1856 | if (unlikely(iq_idx >= MAX_INGQ)) { | |
1857 | dev_err(adapter->pdev_dev, | |
1858 | "Ingress QID %d out of range\n", qid); | |
1859 | continue; | |
1860 | } | |
1861 | rspq = s->ingr_map[iq_idx]; | |
1862 | if (unlikely(rspq == NULL)) { | |
1863 | dev_err(adapter->pdev_dev, | |
1864 | "Ingress QID %d RSPQ=NULL\n", qid); | |
1865 | continue; | |
1866 | } | |
1867 | if (unlikely(rspq->abs_id != qid)) { | |
1868 | dev_err(adapter->pdev_dev, | |
1869 | "Ingress QID %d refers to RSPQ %d\n", | |
1870 | qid, rspq->abs_id); | |
1871 | continue; | |
1872 | } | |
1873 | ||
1874 | /* | |
1875 | * Schedule NAPI processing on the indicated Response Queue | |
1876 | * and move on to the next entry in the Forwarded Interrupt | |
1877 | * Queue. | |
1878 | */ | |
1879 | napi_schedule(&rspq->napi); | |
1880 | rspq_next(intrq); | |
1881 | } | |
1882 | ||
1883 | t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, | |
1884 | CIDXINC(work_done) | | |
1885 | INGRESSQID(intrq->cntxt_id) | | |
1886 | SEINTARM(intrq->intr_params)); | |
1887 | ||
1888 | spin_unlock(&adapter->sge.intrq_lock); | |
1889 | ||
1890 | return work_done; | |
1891 | } | |
1892 | ||
1893 | /* | |
1894 | * The MSI interrupt handler handles data events from SGE response queues as | |
1895 | * well as error and other async events as they all use the same MSI vector. | |
1896 | */ | |
8a67d1c6 | 1897 | static irqreturn_t t4vf_intr_msi(int irq, void *cookie) |
c6e0d914 CL |
1898 | { |
1899 | struct adapter *adapter = cookie; | |
1900 | ||
1901 | process_intrq(adapter); | |
1902 | return IRQ_HANDLED; | |
1903 | } | |
1904 | ||
1905 | /** | |
1906 | * t4vf_intr_handler - select the top-level interrupt handler | |
1907 | * @adapter: the adapter | |
1908 | * | |
1909 | * Selects the top-level interrupt handler based on the type of interrupts | |
1910 | * (MSI-X or MSI). | |
1911 | */ | |
1912 | irq_handler_t t4vf_intr_handler(struct adapter *adapter) | |
1913 | { | |
1914 | BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); | |
1915 | if (adapter->flags & USING_MSIX) | |
1916 | return t4vf_sge_intr_msix; | |
1917 | else | |
1918 | return t4vf_intr_msi; | |
1919 | } | |
1920 | ||
1921 | /** | |
1922 | * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues | |
1923 | * @data: the adapter | |
1924 | * | |
1925 | * Runs periodically from a timer to perform maintenance of SGE RX queues. | |
1926 | * | |
1927 | * a) Replenishes RX queues that have run out due to memory shortage. | |
1928 | * Normally new RX buffers are added when existing ones are consumed but | |
1929 | * when out of memory a queue can become empty. We schedule NAPI to do | |
1930 | * the actual refill. | |
1931 | */ | |
1932 | static void sge_rx_timer_cb(unsigned long data) | |
1933 | { | |
1934 | struct adapter *adapter = (struct adapter *)data; | |
1935 | struct sge *s = &adapter->sge; | |
1936 | unsigned int i; | |
1937 | ||
1938 | /* | |
1939 | * Scan the "Starving Free Lists" flag array looking for any Free | |
1940 | * Lists in need of more free buffers. If we find one and it's not | |
1941 | * being actively polled, then bump its "starving" counter and attempt | |
1942 | * to refill it. If we're successful in adding enough buffers to push | |
1943 | * the Free List over the starving threshold, then we can clear its | |
1944 | * "starving" status. | |
1945 | */ | |
1946 | for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) { | |
1947 | unsigned long m; | |
1948 | ||
1949 | for (m = s->starving_fl[i]; m; m &= m - 1) { | |
1950 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; | |
1951 | struct sge_fl *fl = s->egr_map[id]; | |
1952 | ||
1953 | clear_bit(id, s->starving_fl); | |
1954 | smp_mb__after_clear_bit(); | |
1955 | ||
1956 | /* | |
1957 | * Since we are accessing fl without a lock there's a | |
1958 | * small probability of a false positive where we | |
1959 | * schedule napi but the FL is no longer starving. | |
1960 | * No biggie. | |
1961 | */ | |
1962 | if (fl_starving(fl)) { | |
1963 | struct sge_eth_rxq *rxq; | |
1964 | ||
1965 | rxq = container_of(fl, struct sge_eth_rxq, fl); | |
1966 | if (napi_reschedule(&rxq->rspq.napi)) | |
1967 | fl->starving++; | |
1968 | else | |
1969 | set_bit(id, s->starving_fl); | |
1970 | } | |
1971 | } | |
1972 | } | |
1973 | ||
1974 | /* | |
1975 | * Reschedule the next scan for starving Free Lists ... | |
1976 | */ | |
1977 | mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); | |
1978 | } | |
1979 | ||
1980 | /** | |
1981 | * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues | |
1982 | * @data: the adapter | |
1983 | * | |
1984 | * Runs periodically from a timer to perform maintenance of SGE TX queues. | |
1985 | * | |
1986 | * b) Reclaims completed Tx packets for the Ethernet queues. Normally | |
1987 | * packets are cleaned up by new Tx packets, this timer cleans up packets | |
1988 | * when no new packets are being submitted. This is essential for pktgen, | |
1989 | * at least. | |
1990 | */ | |
1991 | static void sge_tx_timer_cb(unsigned long data) | |
1992 | { | |
1993 | struct adapter *adapter = (struct adapter *)data; | |
1994 | struct sge *s = &adapter->sge; | |
1995 | unsigned int i, budget; | |
1996 | ||
1997 | budget = MAX_TIMER_TX_RECLAIM; | |
1998 | i = s->ethtxq_rover; | |
1999 | do { | |
2000 | struct sge_eth_txq *txq = &s->ethtxq[i]; | |
2001 | ||
2002 | if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { | |
2003 | int avail = reclaimable(&txq->q); | |
2004 | ||
2005 | if (avail > budget) | |
2006 | avail = budget; | |
2007 | ||
2008 | free_tx_desc(adapter, &txq->q, avail, true); | |
2009 | txq->q.in_use -= avail; | |
2010 | __netif_tx_unlock(txq->txq); | |
2011 | ||
2012 | budget -= avail; | |
2013 | if (!budget) | |
2014 | break; | |
2015 | } | |
2016 | ||
2017 | i++; | |
2018 | if (i >= s->ethqsets) | |
2019 | i = 0; | |
2020 | } while (i != s->ethtxq_rover); | |
2021 | s->ethtxq_rover = i; | |
2022 | ||
2023 | /* | |
2024 | * If we found too many reclaimable packets schedule a timer in the | |
2025 | * near future to continue where we left off. Otherwise the next timer | |
2026 | * will be at its normal interval. | |
2027 | */ | |
2028 | mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); | |
2029 | } | |
2030 | ||
2031 | /** | |
2032 | * t4vf_sge_alloc_rxq - allocate an SGE RX Queue | |
2033 | * @adapter: the adapter | |
2034 | * @rspq: pointer to to the new rxq's Response Queue to be filled in | |
2035 | * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue | |
2036 | * @dev: the network device associated with the new rspq | |
2037 | * @intr_dest: MSI-X vector index (overriden in MSI mode) | |
2038 | * @fl: pointer to the new rxq's Free List to be filled in | |
2039 | * @hnd: the interrupt handler to invoke for the rspq | |
2040 | */ | |
2041 | int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |
2042 | bool iqasynch, struct net_device *dev, | |
2043 | int intr_dest, | |
2044 | struct sge_fl *fl, rspq_handler_t hnd) | |
2045 | { | |
2046 | struct port_info *pi = netdev_priv(dev); | |
2047 | struct fw_iq_cmd cmd, rpl; | |
2048 | int ret, iqandst, flsz = 0; | |
2049 | ||
2050 | /* | |
2051 | * If we're using MSI interrupts and we're not initializing the | |
2052 | * Forwarded Interrupt Queue itself, then set up this queue for | |
2053 | * indirect interrupts to the Forwarded Interrupt Queue. Obviously | |
2054 | * the Forwarded Interrupt Queue must be set up before any other | |
2055 | * ingress queue ... | |
2056 | */ | |
2057 | if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) { | |
2058 | iqandst = SGE_INTRDST_IQ; | |
2059 | intr_dest = adapter->sge.intrq.abs_id; | |
2060 | } else | |
2061 | iqandst = SGE_INTRDST_PCI; | |
2062 | ||
2063 | /* | |
2064 | * Allocate the hardware ring for the Response Queue. The size needs | |
2065 | * to be a multiple of 16 which includes the mandatory status entry | |
2066 | * (regardless of whether the Status Page capabilities are enabled or | |
2067 | * not). | |
2068 | */ | |
2069 | rspq->size = roundup(rspq->size, 16); | |
2070 | rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len, | |
2071 | 0, &rspq->phys_addr, NULL, 0); | |
2072 | if (!rspq->desc) | |
2073 | return -ENOMEM; | |
2074 | ||
2075 | /* | |
2076 | * Fill in the Ingress Queue Command. Note: Ideally this code would | |
2077 | * be in t4vf_hw.c but there are so many parameters and dependencies | |
2078 | * on our Linux SGE state that we would end up having to pass tons of | |
2079 | * parameters. We'll have to think about how this might be migrated | |
2080 | * into OS-independent common code ... | |
2081 | */ | |
2082 | memset(&cmd, 0, sizeof(cmd)); | |
2083 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) | | |
2084 | FW_CMD_REQUEST | | |
2085 | FW_CMD_WRITE | | |
2086 | FW_CMD_EXEC); | |
2087 | cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC | | |
2088 | FW_IQ_CMD_IQSTART(1) | | |
2089 | FW_LEN16(cmd)); | |
2090 | cmd.type_to_iqandstindex = | |
2091 | cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | | |
2092 | FW_IQ_CMD_IQASYNCH(iqasynch) | | |
2093 | FW_IQ_CMD_VIID(pi->viid) | | |
2094 | FW_IQ_CMD_IQANDST(iqandst) | | |
2095 | FW_IQ_CMD_IQANUS(1) | | |
2096 | FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) | | |
2097 | FW_IQ_CMD_IQANDSTINDEX(intr_dest)); | |
2098 | cmd.iqdroprss_to_iqesize = | |
2099 | cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) | | |
2100 | FW_IQ_CMD_IQGTSMODE | | |
2101 | FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) | | |
2102 | FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4)); | |
2103 | cmd.iqsize = cpu_to_be16(rspq->size); | |
2104 | cmd.iqaddr = cpu_to_be64(rspq->phys_addr); | |
2105 | ||
2106 | if (fl) { | |
2107 | /* | |
2108 | * Allocate the ring for the hardware free list (with space | |
2109 | * for its status page) along with the associated software | |
2110 | * descriptor ring. The free list size needs to be a multiple | |
2111 | * of the Egress Queue Unit. | |
2112 | */ | |
2113 | fl->size = roundup(fl->size, FL_PER_EQ_UNIT); | |
2114 | fl->desc = alloc_ring(adapter->pdev_dev, fl->size, | |
2115 | sizeof(__be64), sizeof(struct rx_sw_desc), | |
2116 | &fl->addr, &fl->sdesc, STAT_LEN); | |
2117 | if (!fl->desc) { | |
2118 | ret = -ENOMEM; | |
2119 | goto err; | |
2120 | } | |
2121 | ||
2122 | /* | |
2123 | * Calculate the size of the hardware free list ring plus | |
caedda35 | 2124 | * Status Page (which the SGE will place after the end of the |
c6e0d914 CL |
2125 | * free list ring) in Egress Queue Units. |
2126 | */ | |
2127 | flsz = (fl->size / FL_PER_EQ_UNIT + | |
2128 | STAT_LEN / EQ_UNIT); | |
2129 | ||
2130 | /* | |
2131 | * Fill in all the relevant firmware Ingress Queue Command | |
2132 | * fields for the free list. | |
2133 | */ | |
2134 | cmd.iqns_to_fl0congen = | |
2135 | cpu_to_be32( | |
2136 | FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) | | |
ce91a923 NKI |
2137 | FW_IQ_CMD_FL0PACKEN(1) | |
2138 | FW_IQ_CMD_FL0PADEN(1)); | |
c6e0d914 CL |
2139 | cmd.fl0dcaen_to_fl0cidxfthresh = |
2140 | cpu_to_be16( | |
2141 | FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) | | |
2142 | FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B)); | |
2143 | cmd.fl0size = cpu_to_be16(flsz); | |
2144 | cmd.fl0addr = cpu_to_be64(fl->addr); | |
2145 | } | |
2146 | ||
2147 | /* | |
2148 | * Issue the firmware Ingress Queue Command and extract the results if | |
2149 | * it completes successfully. | |
2150 | */ | |
2151 | ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); | |
2152 | if (ret) | |
2153 | goto err; | |
2154 | ||
2155 | netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64); | |
2156 | rspq->cur_desc = rspq->desc; | |
2157 | rspq->cidx = 0; | |
2158 | rspq->gen = 1; | |
2159 | rspq->next_intr_params = rspq->intr_params; | |
2160 | rspq->cntxt_id = be16_to_cpu(rpl.iqid); | |
2161 | rspq->abs_id = be16_to_cpu(rpl.physiqid); | |
2162 | rspq->size--; /* subtract status entry */ | |
2163 | rspq->adapter = adapter; | |
2164 | rspq->netdev = dev; | |
2165 | rspq->handler = hnd; | |
2166 | ||
2167 | /* set offset to -1 to distinguish ingress queues without FL */ | |
2168 | rspq->offset = fl ? 0 : -1; | |
2169 | ||
2170 | if (fl) { | |
2171 | fl->cntxt_id = be16_to_cpu(rpl.fl0id); | |
2172 | fl->avail = 0; | |
2173 | fl->pend_cred = 0; | |
2174 | fl->pidx = 0; | |
2175 | fl->cidx = 0; | |
2176 | fl->alloc_failed = 0; | |
2177 | fl->large_alloc_failed = 0; | |
2178 | fl->starving = 0; | |
2179 | refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); | |
2180 | } | |
2181 | ||
2182 | return 0; | |
2183 | ||
2184 | err: | |
2185 | /* | |
2186 | * An error occurred. Clean up our partial allocation state and | |
2187 | * return the error. | |
2188 | */ | |
2189 | if (rspq->desc) { | |
2190 | dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len, | |
2191 | rspq->desc, rspq->phys_addr); | |
2192 | rspq->desc = NULL; | |
2193 | } | |
2194 | if (fl && fl->desc) { | |
2195 | kfree(fl->sdesc); | |
2196 | fl->sdesc = NULL; | |
2197 | dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT, | |
2198 | fl->desc, fl->addr); | |
2199 | fl->desc = NULL; | |
2200 | } | |
2201 | return ret; | |
2202 | } | |
2203 | ||
2204 | /** | |
2205 | * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue | |
2206 | * @adapter: the adapter | |
2207 | * @txq: pointer to the new txq to be filled in | |
2208 | * @devq: the network TX queue associated with the new txq | |
2209 | * @iqid: the relative ingress queue ID to which events relating to | |
2210 | * the new txq should be directed | |
2211 | */ | |
2212 | int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |
2213 | struct net_device *dev, struct netdev_queue *devq, | |
2214 | unsigned int iqid) | |
2215 | { | |
2216 | int ret, nentries; | |
2217 | struct fw_eq_eth_cmd cmd, rpl; | |
2218 | struct port_info *pi = netdev_priv(dev); | |
2219 | ||
2220 | /* | |
caedda35 CL |
2221 | * Calculate the size of the hardware TX Queue (including the Status |
2222 | * Page on the end of the TX Queue) in units of TX Descriptors. | |
c6e0d914 CL |
2223 | */ |
2224 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | |
2225 | ||
2226 | /* | |
2227 | * Allocate the hardware ring for the TX ring (with space for its | |
2228 | * status page) along with the associated software descriptor ring. | |
2229 | */ | |
2230 | txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, | |
2231 | sizeof(struct tx_desc), | |
2232 | sizeof(struct tx_sw_desc), | |
2233 | &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); | |
2234 | if (!txq->q.desc) | |
2235 | return -ENOMEM; | |
2236 | ||
2237 | /* | |
2238 | * Fill in the Egress Queue Command. Note: As with the direct use of | |
2239 | * the firmware Ingress Queue COmmand above in our RXQ allocation | |
2240 | * routine, ideally, this code would be in t4vf_hw.c. Again, we'll | |
2241 | * have to see if there's some reasonable way to parameterize it | |
2242 | * into the common code ... | |
2243 | */ | |
2244 | memset(&cmd, 0, sizeof(cmd)); | |
2245 | cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) | | |
2246 | FW_CMD_REQUEST | | |
2247 | FW_CMD_WRITE | | |
2248 | FW_CMD_EXEC); | |
2249 | cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | | |
2250 | FW_EQ_ETH_CMD_EQSTART | | |
2251 | FW_LEN16(cmd)); | |
2252 | cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); | |
2253 | cmd.fetchszm_to_iqid = | |
2254 | cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | | |
2255 | FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | | |
2256 | FW_EQ_ETH_CMD_IQID(iqid)); | |
2257 | cmd.dcaen_to_eqsize = | |
2258 | cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) | | |
2259 | FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) | | |
2260 | FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) | | |
2261 | FW_EQ_ETH_CMD_EQSIZE(nentries)); | |
2262 | cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); | |
2263 | ||
2264 | /* | |
2265 | * Issue the firmware Egress Queue Command and extract the results if | |
2266 | * it completes successfully. | |
2267 | */ | |
2268 | ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); | |
2269 | if (ret) { | |
2270 | /* | |
2271 | * The girmware Ingress Queue Command failed for some reason. | |
2272 | * Free up our partial allocation state and return the error. | |
2273 | */ | |
2274 | kfree(txq->q.sdesc); | |
2275 | txq->q.sdesc = NULL; | |
2276 | dma_free_coherent(adapter->pdev_dev, | |
2277 | nentries * sizeof(struct tx_desc), | |
2278 | txq->q.desc, txq->q.phys_addr); | |
2279 | txq->q.desc = NULL; | |
2280 | return ret; | |
2281 | } | |
2282 | ||
2283 | txq->q.in_use = 0; | |
2284 | txq->q.cidx = 0; | |
2285 | txq->q.pidx = 0; | |
2286 | txq->q.stat = (void *)&txq->q.desc[txq->q.size]; | |
2287 | txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd)); | |
2288 | txq->q.abs_id = | |
2289 | FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd)); | |
2290 | txq->txq = devq; | |
2291 | txq->tso = 0; | |
2292 | txq->tx_cso = 0; | |
2293 | txq->vlan_ins = 0; | |
2294 | txq->q.stops = 0; | |
2295 | txq->q.restarts = 0; | |
2296 | txq->mapping_err = 0; | |
2297 | return 0; | |
2298 | } | |
2299 | ||
2300 | /* | |
2301 | * Free the DMA map resources associated with a TX queue. | |
2302 | */ | |
2303 | static void free_txq(struct adapter *adapter, struct sge_txq *tq) | |
2304 | { | |
2305 | dma_free_coherent(adapter->pdev_dev, | |
2306 | tq->size * sizeof(*tq->desc) + STAT_LEN, | |
2307 | tq->desc, tq->phys_addr); | |
2308 | tq->cntxt_id = 0; | |
2309 | tq->sdesc = NULL; | |
2310 | tq->desc = NULL; | |
2311 | } | |
2312 | ||
2313 | /* | |
2314 | * Free the resources associated with a response queue (possibly including a | |
2315 | * free list). | |
2316 | */ | |
2317 | static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, | |
2318 | struct sge_fl *fl) | |
2319 | { | |
2320 | unsigned int flid = fl ? fl->cntxt_id : 0xffff; | |
2321 | ||
2322 | t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, | |
2323 | rspq->cntxt_id, flid, 0xffff); | |
2324 | dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len, | |
2325 | rspq->desc, rspq->phys_addr); | |
2326 | netif_napi_del(&rspq->napi); | |
2327 | rspq->netdev = NULL; | |
2328 | rspq->cntxt_id = 0; | |
2329 | rspq->abs_id = 0; | |
2330 | rspq->desc = NULL; | |
2331 | ||
2332 | if (fl) { | |
2333 | free_rx_bufs(adapter, fl, fl->avail); | |
2334 | dma_free_coherent(adapter->pdev_dev, | |
2335 | fl->size * sizeof(*fl->desc) + STAT_LEN, | |
2336 | fl->desc, fl->addr); | |
2337 | kfree(fl->sdesc); | |
2338 | fl->sdesc = NULL; | |
2339 | fl->cntxt_id = 0; | |
2340 | fl->desc = NULL; | |
2341 | } | |
2342 | } | |
2343 | ||
2344 | /** | |
2345 | * t4vf_free_sge_resources - free SGE resources | |
2346 | * @adapter: the adapter | |
2347 | * | |
2348 | * Frees resources used by the SGE queue sets. | |
2349 | */ | |
2350 | void t4vf_free_sge_resources(struct adapter *adapter) | |
2351 | { | |
2352 | struct sge *s = &adapter->sge; | |
2353 | struct sge_eth_rxq *rxq = s->ethrxq; | |
2354 | struct sge_eth_txq *txq = s->ethtxq; | |
2355 | struct sge_rspq *evtq = &s->fw_evtq; | |
2356 | struct sge_rspq *intrq = &s->intrq; | |
2357 | int qs; | |
2358 | ||
b97d13a5 | 2359 | for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { |
c6e0d914 CL |
2360 | if (rxq->rspq.desc) |
2361 | free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); | |
2362 | if (txq->q.desc) { | |
2363 | t4vf_eth_eq_free(adapter, txq->q.cntxt_id); | |
2364 | free_tx_desc(adapter, &txq->q, txq->q.in_use, true); | |
2365 | kfree(txq->q.sdesc); | |
2366 | free_txq(adapter, &txq->q); | |
2367 | } | |
2368 | } | |
2369 | if (evtq->desc) | |
2370 | free_rspq_fl(adapter, evtq, NULL); | |
2371 | if (intrq->desc) | |
2372 | free_rspq_fl(adapter, intrq, NULL); | |
2373 | } | |
2374 | ||
2375 | /** | |
2376 | * t4vf_sge_start - enable SGE operation | |
2377 | * @adapter: the adapter | |
2378 | * | |
2379 | * Start tasklets and timers associated with the DMA engine. | |
2380 | */ | |
2381 | void t4vf_sge_start(struct adapter *adapter) | |
2382 | { | |
2383 | adapter->sge.ethtxq_rover = 0; | |
2384 | mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); | |
2385 | mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); | |
2386 | } | |
2387 | ||
2388 | /** | |
2389 | * t4vf_sge_stop - disable SGE operation | |
2390 | * @adapter: the adapter | |
2391 | * | |
2392 | * Stop tasklets and timers associated with the DMA engine. Note that | |
2393 | * this is effective only if measures have been taken to disable any HW | |
2394 | * events that may restart them. | |
2395 | */ | |
2396 | void t4vf_sge_stop(struct adapter *adapter) | |
2397 | { | |
2398 | struct sge *s = &adapter->sge; | |
2399 | ||
2400 | if (s->rx_timer.function) | |
2401 | del_timer_sync(&s->rx_timer); | |
2402 | if (s->tx_timer.function) | |
2403 | del_timer_sync(&s->tx_timer); | |
2404 | } | |
2405 | ||
2406 | /** | |
2407 | * t4vf_sge_init - initialize SGE | |
2408 | * @adapter: the adapter | |
2409 | * | |
2410 | * Performs SGE initialization needed every time after a chip reset. | |
2411 | * We do not initialize any of the queue sets here, instead the driver | |
2412 | * top-level must request those individually. We also do not enable DMA | |
2413 | * here, that should be done after the queues have been set up. | |
2414 | */ | |
2415 | int t4vf_sge_init(struct adapter *adapter) | |
2416 | { | |
2417 | struct sge_params *sge_params = &adapter->params.sge; | |
2418 | u32 fl0 = sge_params->sge_fl_buffer_size[0]; | |
2419 | u32 fl1 = sge_params->sge_fl_buffer_size[1]; | |
2420 | struct sge *s = &adapter->sge; | |
2421 | ||
2422 | /* | |
2423 | * Start by vetting the basic SGE parameters which have been set up by | |
2424 | * the Physical Function Driver. Ideally we should be able to deal | |
2425 | * with _any_ configuration. Practice is different ... | |
2426 | */ | |
2427 | if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { | |
2428 | dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", | |
2429 | fl0, fl1); | |
2430 | return -EINVAL; | |
2431 | } | |
52367a76 | 2432 | if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) { |
c6e0d914 CL |
2433 | dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); |
2434 | return -EINVAL; | |
2435 | } | |
2436 | ||
2437 | /* | |
2438 | * Now translate the adapter parameters into our internal forms. | |
2439 | */ | |
2440 | if (fl1) | |
2441 | FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; | |
52367a76 VP |
2442 | STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) |
2443 | ? 128 : 64); | |
c6e0d914 CL |
2444 | PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); |
2445 | FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + | |
b3003be3 | 2446 | SGE_INGPADBOUNDARY_SHIFT); |
c6e0d914 CL |
2447 | |
2448 | /* | |
2449 | * Set up tasklet timers. | |
2450 | */ | |
2451 | setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter); | |
2452 | setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter); | |
2453 | ||
2454 | /* | |
2455 | * Initialize Forwarded Interrupt Queue lock. | |
2456 | */ | |
2457 | spin_lock_init(&s->intrq_lock); | |
2458 | ||
2459 | return 0; | |
2460 | } |