2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
48 #include "t4_values.h"
53 * Rx buffer size. We use largish buffers if possible but settle for single
54 * pages under memory shortage.
57 # define FL_PG_ORDER 0
59 # define FL_PG_ORDER (16 - PAGE_SHIFT)
62 /* RX_PULL_LEN should be <= RX_COPY_THRES */
63 #define RX_COPY_THRES 256
64 #define RX_PULL_LEN 128
67 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
68 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
70 #define RX_PKT_SKB_LEN 512
73 * Max number of Tx descriptors we clean up at a time. Should be modest as
74 * freeing skbs isn't cheap and it happens while holding locks. We just need
75 * to free packets faster than they arrive, we eventually catch up and keep
76 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
78 #define MAX_TX_RECLAIM 16
81 * Max number of Rx buffers we replenish at a time. Again keep this modest,
82 * allocating buffers isn't cheap either.
84 #define MAX_RX_REFILL 16U
87 * Period of the Rx queue check timer. This timer is infrequent as it has
88 * something to do only when the system experiences severe memory shortage.
90 #define RX_QCHECK_PERIOD (HZ / 2)
93 * Period of the Tx queue check timer.
95 #define TX_QCHECK_PERIOD (HZ / 2)
97 /* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
98 * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
99 * State Machines in the same state for this amount of time (in HZ) then we'll
100 * issue a warning about a potential hang. We'll repeat the warning as the
101 * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
102 * the situation clears. If the situation clears, we'll note that as well.
104 #define SGE_IDMA_WARN_THRESH (1 * HZ)
105 #define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
110 #define MAX_TIMER_TX_RECLAIM 100
113 * Timer index used when backing off due to memory shortage.
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
118 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
119 * attempt to refill it.
121 #define FL_STARVE_THRES 4
124 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
125 * This is the same as calc_tx_descs() for a TSO packet with
126 * nr_frags == MAX_SKB_FRAGS.
128 #define ETHTXQ_STOP_THRES \
129 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
132 * Suspension threshold for non-Ethernet Tx queues. We require enough room
133 * for a full sized WR.
135 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
138 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
141 #define MAX_IMM_TX_PKT_LEN 128
144 * Max size of a WR sent through a control Tx queue.
146 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
148 struct tx_sw_desc { /* SW state per Tx descriptor */
150 struct ulptx_sgl *sgl;
153 struct rx_sw_desc { /* SW state per Rx descriptor */
159 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
160 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
161 * We could easily support more but there doesn't seem to be much need for
164 #define FL_MTU_SMALL 1500
165 #define FL_MTU_LARGE 9000
167 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
170 struct sge *s = &adapter->sge;
172 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
175 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
176 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
179 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
180 * these to specify the buffer size as an index into the SGE Free List Buffer
181 * Size register array. We also use bit 4, when the buffer has been unmapped
182 * for DMA, but this is of course never sent to the hardware and is only used
183 * to prevent double unmappings. All of the above requires that the Free List
184 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
185 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
186 * Free List Buffer alignment is 32 bytes, this works out for us ...
189 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
190 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
191 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
194 * XXX We shouldn't depend on being able to use these indices.
195 * XXX Especially when some other Master PF has initialized the
196 * XXX adapter or we use the Firmware Configuration File. We
197 * XXX should really search through the Host Buffer Size register
198 * XXX array for the appropriately sized buffer indices.
200 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
201 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
203 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
204 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
207 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
208 #define MIN_NAPI_WORK 1
210 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
212 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
215 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
217 return !(d->dma_addr & RX_UNMAPPED_BUF);
221 * txq_avail - return the number of available slots in a Tx queue
224 * Returns the number of descriptors in a Tx queue available to write new
227 static inline unsigned int txq_avail(const struct sge_txq *q)
229 return q->size - 1 - q->in_use;
233 * fl_cap - return the capacity of a free-buffer list
236 * Returns the capacity of a free-buffer list. The capacity is less than
237 * the size because one descriptor needs to be left unpopulated, otherwise
238 * HW will think the FL is empty.
240 static inline unsigned int fl_cap(const struct sge_fl *fl)
242 return fl->size - 8; /* 1 descriptor = 8 buffers */
245 static inline bool fl_starving(const struct sge_fl *fl)
247 return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
250 static int map_skb(struct device *dev, const struct sk_buff *skb,
253 const skb_frag_t *fp, *end;
254 const struct skb_shared_info *si;
256 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
257 if (dma_mapping_error(dev, *addr))
260 si = skb_shinfo(skb);
261 end = &si->frags[si->nr_frags];
263 for (fp = si->frags; fp < end; fp++) {
264 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
266 if (dma_mapping_error(dev, *addr))
272 while (fp-- > si->frags)
273 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
275 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
280 #ifdef CONFIG_NEED_DMA_MAP_STATE
281 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
282 const dma_addr_t *addr)
284 const skb_frag_t *fp, *end;
285 const struct skb_shared_info *si;
287 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
289 si = skb_shinfo(skb);
290 end = &si->frags[si->nr_frags];
291 for (fp = si->frags; fp < end; fp++)
292 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
296 * deferred_unmap_destructor - unmap a packet when it is freed
299 * This is the packet destructor used for Tx packets that need to remain
300 * mapped until they are freed rather than until their Tx descriptors are
303 static void deferred_unmap_destructor(struct sk_buff *skb)
305 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
309 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
310 const struct ulptx_sgl *sgl, const struct sge_txq *q)
312 const struct ulptx_sge_pair *p;
313 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
315 if (likely(skb_headlen(skb)))
316 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
319 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
325 * the complexity below is because of the possibility of a wrap-around
326 * in the middle of an SGL
328 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
329 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
330 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
331 ntohl(p->len[0]), DMA_TO_DEVICE);
332 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
333 ntohl(p->len[1]), DMA_TO_DEVICE);
335 } else if ((u8 *)p == (u8 *)q->stat) {
336 p = (const struct ulptx_sge_pair *)q->desc;
338 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
339 const __be64 *addr = (const __be64 *)q->desc;
341 dma_unmap_page(dev, be64_to_cpu(addr[0]),
342 ntohl(p->len[0]), DMA_TO_DEVICE);
343 dma_unmap_page(dev, be64_to_cpu(addr[1]),
344 ntohl(p->len[1]), DMA_TO_DEVICE);
345 p = (const struct ulptx_sge_pair *)&addr[2];
347 const __be64 *addr = (const __be64 *)q->desc;
349 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
350 ntohl(p->len[0]), DMA_TO_DEVICE);
351 dma_unmap_page(dev, be64_to_cpu(addr[0]),
352 ntohl(p->len[1]), DMA_TO_DEVICE);
353 p = (const struct ulptx_sge_pair *)&addr[1];
359 if ((u8 *)p == (u8 *)q->stat)
360 p = (const struct ulptx_sge_pair *)q->desc;
361 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
362 *(const __be64 *)q->desc;
363 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
369 * free_tx_desc - reclaims Tx descriptors and their buffers
370 * @adapter: the adapter
371 * @q: the Tx queue to reclaim descriptors from
372 * @n: the number of descriptors to reclaim
373 * @unmap: whether the buffers should be unmapped for DMA
375 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
376 * Tx buffers. Called with the Tx queue lock held.
378 static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
379 unsigned int n, bool unmap)
381 struct tx_sw_desc *d;
382 unsigned int cidx = q->cidx;
383 struct device *dev = adap->pdev_dev;
387 if (d->skb) { /* an SGL is present */
389 unmap_sgl(dev, d->skb, d->sgl, q);
390 dev_consume_skb_any(d->skb);
394 if (++cidx == q->size) {
403 * Return the number of reclaimable descriptors in a Tx queue.
405 static inline int reclaimable(const struct sge_txq *q)
407 int hw_cidx = ntohs(q->stat->cidx);
409 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
413 * reclaim_completed_tx - reclaims completed Tx descriptors
415 * @q: the Tx queue to reclaim completed descriptors from
416 * @unmap: whether the buffers should be unmapped for DMA
418 * Reclaims Tx descriptors that the SGE has indicated it has processed,
419 * and frees the associated buffers if possible. Called with the Tx
422 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
425 int avail = reclaimable(q);
429 * Limit the amount of clean up work we do at a time to keep
430 * the Tx lock hold time O(1).
432 if (avail > MAX_TX_RECLAIM)
433 avail = MAX_TX_RECLAIM;
435 free_tx_desc(adap, q, avail, unmap);
440 static inline int get_buf_size(struct adapter *adapter,
441 const struct rx_sw_desc *d)
443 struct sge *s = &adapter->sge;
444 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
447 switch (rx_buf_size_idx) {
448 case RX_SMALL_PG_BUF:
449 buf_size = PAGE_SIZE;
452 case RX_LARGE_PG_BUF:
453 buf_size = PAGE_SIZE << s->fl_pg_order;
456 case RX_SMALL_MTU_BUF:
457 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
460 case RX_LARGE_MTU_BUF:
461 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
472 * free_rx_bufs - free the Rx buffers on an SGE free list
474 * @q: the SGE free list to free buffers from
475 * @n: how many buffers to free
477 * Release the next @n buffers on an SGE free-buffer Rx queue. The
478 * buffers must be made inaccessible to HW before calling this function.
480 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
483 struct rx_sw_desc *d = &q->sdesc[q->cidx];
485 if (is_buf_mapped(d))
486 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
487 get_buf_size(adap, d),
491 if (++q->cidx == q->size)
498 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
500 * @q: the SGE free list
502 * Unmap the current buffer on an SGE free-buffer Rx queue. The
503 * buffer must be made inaccessible to HW before calling this function.
505 * This is similar to @free_rx_bufs above but does not free the buffer.
506 * Do note that the FL still loses any further access to the buffer.
508 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
510 struct rx_sw_desc *d = &q->sdesc[q->cidx];
512 if (is_buf_mapped(d))
513 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
514 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
516 if (++q->cidx == q->size)
521 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
524 if (q->pend_cred >= 8) {
525 if (is_t4(adap->params.chip))
526 val = PIDX_V(q->pend_cred / 8);
528 val = PIDX_T5_V(q->pend_cred / 8) |
533 /* If we don't have access to the new User Doorbell (T5+), use
534 * the old doorbell mechanism; otherwise use the new BAR2
537 if (unlikely(q->bar2_addr == NULL)) {
538 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
539 val | QID_V(q->cntxt_id));
541 writel(val | QID_V(q->bar2_qid),
542 q->bar2_addr + SGE_UDB_KDOORBELL);
544 /* This Write memory Barrier will force the write to
545 * the User Doorbell area to be flushed.
553 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
557 sd->dma_addr = mapping; /* includes size low bits */
561 * refill_fl - refill an SGE Rx buffer ring
563 * @q: the ring to refill
564 * @n: the number of new buffers to allocate
565 * @gfp: the gfp flags for the allocations
567 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
568 * allocated with the supplied gfp flags. The caller must assure that
569 * @n does not exceed the queue's capacity. If afterwards the queue is
570 * found critically low mark it as starving in the bitmap of starving FLs.
572 * Returns the number of buffers allocated.
574 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
577 struct sge *s = &adap->sge;
580 unsigned int cred = q->avail;
581 __be64 *d = &q->desc[q->pidx];
582 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
586 if (s->fl_pg_order == 0)
587 goto alloc_small_pages;
590 * Prefer large buffers
593 pg = __dev_alloc_pages(gfp, s->fl_pg_order);
595 q->large_alloc_failed++;
596 break; /* fall back to single pages */
599 mapping = dma_map_page(adap->pdev_dev, pg, 0,
600 PAGE_SIZE << s->fl_pg_order,
602 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
603 __free_pages(pg, s->fl_pg_order);
604 goto out; /* do not try small pages for this error */
606 mapping |= RX_LARGE_PG_BUF;
607 *d++ = cpu_to_be64(mapping);
609 set_rx_sw_desc(sd, pg, mapping);
613 if (++q->pidx == q->size) {
623 pg = __dev_alloc_page(gfp);
629 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
631 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
635 *d++ = cpu_to_be64(mapping);
637 set_rx_sw_desc(sd, pg, mapping);
641 if (++q->pidx == q->size) {
648 out: cred = q->avail - cred;
649 q->pend_cred += cred;
652 if (unlikely(fl_starving(q))) {
654 set_bit(q->cntxt_id - adap->sge.egr_start,
655 adap->sge.starving_fl);
661 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
663 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
668 * alloc_ring - allocate resources for an SGE descriptor ring
669 * @dev: the PCI device's core device
670 * @nelem: the number of descriptors
671 * @elem_size: the size of each descriptor
672 * @sw_size: the size of the SW state associated with each ring element
673 * @phys: the physical address of the allocated ring
674 * @metadata: address of the array holding the SW state for the ring
675 * @stat_size: extra space in HW ring for status information
676 * @node: preferred node for memory allocations
678 * Allocates resources for an SGE descriptor ring, such as Tx queues,
679 * free buffer lists, or response queues. Each SGE ring requires
680 * space for its HW descriptors plus, optionally, space for the SW state
681 * associated with each HW entry (the metadata). The function returns
682 * three values: the virtual address for the HW ring (the return value
683 * of the function), the bus address of the HW ring, and the address
686 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
687 size_t sw_size, dma_addr_t *phys, void *metadata,
688 size_t stat_size, int node)
690 size_t len = nelem * elem_size + stat_size;
692 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
697 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
700 dma_free_coherent(dev, len, p, *phys);
705 *(void **)metadata = s;
711 * sgl_len - calculates the size of an SGL of the given capacity
712 * @n: the number of SGL entries
714 * Calculates the number of flits needed for a scatter/gather list that
715 * can hold the given number of entries.
717 static inline unsigned int sgl_len(unsigned int n)
720 return (3 * n) / 2 + (n & 1) + 2;
724 * flits_to_desc - returns the num of Tx descriptors for the given flits
725 * @n: the number of flits
727 * Returns the number of Tx descriptors needed for the supplied number
730 static inline unsigned int flits_to_desc(unsigned int n)
732 BUG_ON(n > SGE_MAX_WR_LEN / 8);
733 return DIV_ROUND_UP(n, 8);
737 * is_eth_imm - can an Ethernet packet be sent as immediate data?
740 * Returns whether an Ethernet packet is small enough to fit as
741 * immediate data. Return value corresponds to headroom required.
743 static inline int is_eth_imm(const struct sk_buff *skb)
745 int hdrlen = skb_shinfo(skb)->gso_size ?
746 sizeof(struct cpl_tx_pkt_lso_core) : 0;
748 hdrlen += sizeof(struct cpl_tx_pkt);
749 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
755 * calc_tx_flits - calculate the number of flits for a packet Tx WR
758 * Returns the number of flits needed for a Tx WR for the given Ethernet
759 * packet, including the needed WR and CPL headers.
761 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
764 int hdrlen = is_eth_imm(skb);
767 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
769 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
770 if (skb_shinfo(skb)->gso_size)
776 * calc_tx_descs - calculate the number of Tx descriptors for a packet
779 * Returns the number of Tx descriptors needed for the given Ethernet
780 * packet, including the needed WR and CPL headers.
782 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
784 return flits_to_desc(calc_tx_flits(skb));
788 * write_sgl - populate a scatter/gather list for a packet
790 * @q: the Tx queue we are writing into
791 * @sgl: starting location for writing the SGL
792 * @end: points right after the end of the SGL
793 * @start: start offset into skb main-body data to include in the SGL
794 * @addr: the list of bus addresses for the SGL elements
796 * Generates a gather list for the buffers that make up a packet.
797 * The caller must provide adequate space for the SGL that will be written.
798 * The SGL includes all of the packet's page fragments and the data in its
799 * main body except for the first @start bytes. @sgl must be 16-byte
800 * aligned and within a Tx descriptor with available space. @end points
801 * right after the end of the SGL but does not account for any potential
802 * wrap around, i.e., @end > @sgl.
804 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
805 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
806 const dma_addr_t *addr)
809 struct ulptx_sge_pair *to;
810 const struct skb_shared_info *si = skb_shinfo(skb);
811 unsigned int nfrags = si->nr_frags;
812 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
814 len = skb_headlen(skb) - start;
816 sgl->len0 = htonl(len);
817 sgl->addr0 = cpu_to_be64(addr[0] + start);
820 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
821 sgl->addr0 = cpu_to_be64(addr[1]);
824 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
825 ULPTX_NSGE_V(nfrags));
826 if (likely(--nfrags == 0))
829 * Most of the complexity below deals with the possibility we hit the
830 * end of the queue in the middle of writing the SGL. For this case
831 * only we create the SGL in a temporary buffer and then copy it.
833 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
835 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
836 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
837 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
838 to->addr[0] = cpu_to_be64(addr[i]);
839 to->addr[1] = cpu_to_be64(addr[++i]);
842 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
843 to->len[1] = cpu_to_be32(0);
844 to->addr[0] = cpu_to_be64(addr[i + 1]);
846 if (unlikely((u8 *)end > (u8 *)q->stat)) {
847 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
850 memcpy(sgl->sge, buf, part0);
851 part1 = (u8 *)end - (u8 *)q->stat;
852 memcpy(q->desc, (u8 *)buf + part0, part1);
853 end = (void *)q->desc + part1;
855 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
859 /* This function copies 64 byte coalesced work request to
860 * memory mapped BAR2 space. For coalesced WR SGE fetches
861 * data from the FIFO instead of from Host.
863 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
876 * ring_tx_db - check and potentially ring a Tx queue's doorbell
879 * @n: number of new descriptors to give to HW
881 * Ring the doorbel for a Tx queue.
883 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
885 wmb(); /* write descriptors before telling HW */
887 /* If we don't have access to the new User Doorbell (T5+), use the old
888 * doorbell mechanism; otherwise use the new BAR2 mechanism.
890 if (unlikely(q->bar2_addr == NULL)) {
894 /* For T4 we need to participate in the Doorbell Recovery
897 spin_lock_irqsave(&q->db_lock, flags);
899 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
900 QID_V(q->cntxt_id) | val);
903 q->db_pidx = q->pidx;
904 spin_unlock_irqrestore(&q->db_lock, flags);
906 u32 val = PIDX_T5_V(n);
908 /* T4 and later chips share the same PIDX field offset within
909 * the doorbell, but T5 and later shrank the field in order to
910 * gain a bit for Doorbell Priority. The field was absurdly
911 * large in the first place (14 bits) so we just use the T5
912 * and later limits and warn if a Queue ID is too large.
914 WARN_ON(val & DBPRIO_F);
916 /* If we're only writing a single TX Descriptor and we can use
917 * Inferred QID registers, we can use the Write Combining
918 * Gather Buffer; otherwise we use the simple doorbell.
920 if (n == 1 && q->bar2_qid == 0) {
924 u64 *wr = (u64 *)&q->desc[index];
926 cxgb_pio_copy((u64 __iomem *)
927 (q->bar2_addr + SGE_UDB_WCDOORBELL),
930 writel(val | QID_V(q->bar2_qid),
931 q->bar2_addr + SGE_UDB_KDOORBELL);
934 /* This Write Memory Barrier will force the write to the User
935 * Doorbell area to be flushed. This is needed to prevent
936 * writes on different CPUs for the same queue from hitting
937 * the adapter out of order. This is required when some Work
938 * Requests take the Write Combine Gather Buffer path (user
939 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
940 * take the traditional path where we simply increment the
941 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
942 * hardware DMA read the actual Work Request.
949 * inline_tx_skb - inline a packet's data into Tx descriptors
951 * @q: the Tx queue where the packet will be inlined
952 * @pos: starting position in the Tx queue where to inline the packet
954 * Inline a packet's contents directly into Tx descriptors, starting at
955 * the given position within the Tx DMA ring.
956 * Most of the complexity of this operation is dealing with wrap arounds
957 * in the middle of the packet we want to inline.
959 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
963 int left = (void *)q->stat - pos;
965 if (likely(skb->len <= left)) {
966 if (likely(!skb->data_len))
967 skb_copy_from_linear_data(skb, pos, skb->len);
969 skb_copy_bits(skb, 0, pos, skb->len);
972 skb_copy_bits(skb, 0, pos, left);
973 skb_copy_bits(skb, left, q->desc, skb->len - left);
974 pos = (void *)q->desc + (skb->len - left);
977 /* 0-pad to multiple of 16 */
978 p = PTR_ALIGN(pos, 8);
979 if ((uintptr_t)p & 8)
984 * Figure out what HW csum a packet wants and return the appropriate control
987 static u64 hwcsum(const struct sk_buff *skb)
990 const struct iphdr *iph = ip_hdr(skb);
992 if (iph->version == 4) {
993 if (iph->protocol == IPPROTO_TCP)
994 csum_type = TX_CSUM_TCPIP;
995 else if (iph->protocol == IPPROTO_UDP)
996 csum_type = TX_CSUM_UDPIP;
999 * unknown protocol, disable HW csum
1000 * and hope a bad packet is detected
1002 return TXPKT_L4CSUM_DIS;
1006 * this doesn't work with extension headers
1008 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1010 if (ip6h->nexthdr == IPPROTO_TCP)
1011 csum_type = TX_CSUM_TCPIP6;
1012 else if (ip6h->nexthdr == IPPROTO_UDP)
1013 csum_type = TX_CSUM_UDPIP6;
1018 if (likely(csum_type >= TX_CSUM_TCPIP))
1019 return TXPKT_CSUM_TYPE(csum_type) |
1020 TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1021 TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1023 int start = skb_transport_offset(skb);
1025 return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
1026 TXPKT_CSUM_LOC(start + skb->csum_offset);
1030 static void eth_txq_stop(struct sge_eth_txq *q)
1032 netif_tx_stop_queue(q->txq);
1036 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1040 if (q->pidx >= q->size)
1045 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1047 * @dev: the egress net device
1049 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1051 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1057 unsigned int flits, ndesc;
1058 struct adapter *adap;
1059 struct sge_eth_txq *q;
1060 const struct port_info *pi;
1061 struct fw_eth_tx_pkt_wr *wr;
1062 struct cpl_tx_pkt_core *cpl;
1063 const struct skb_shared_info *ssi;
1064 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1065 bool immediate = false;
1068 * The chip min packet length is 10 octets but play safe and reject
1069 * anything shorter than an Ethernet header.
1071 if (unlikely(skb->len < ETH_HLEN)) {
1072 out_free: dev_kfree_skb_any(skb);
1073 return NETDEV_TX_OK;
1076 pi = netdev_priv(dev);
1078 qidx = skb_get_queue_mapping(skb);
1079 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1081 reclaim_completed_tx(adap, &q->q, true);
1083 flits = calc_tx_flits(skb);
1084 ndesc = flits_to_desc(flits);
1085 credits = txq_avail(&q->q) - ndesc;
1087 if (unlikely(credits < 0)) {
1089 dev_err(adap->pdev_dev,
1090 "%s: Tx ring %u full while queue awake!\n",
1092 return NETDEV_TX_BUSY;
1095 if (is_eth_imm(skb))
1099 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1104 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1105 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1107 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1110 wr = (void *)&q->q.desc[q->q.pidx];
1111 wr->equiq_to_len16 = htonl(wr_mid);
1112 wr->r3 = cpu_to_be64(0);
1113 end = (u64 *)wr + flits;
1115 len = immediate ? skb->len : 0;
1116 ssi = skb_shinfo(skb);
1117 if (ssi->gso_size) {
1118 struct cpl_tx_pkt_lso *lso = (void *)wr;
1119 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1120 int l3hdr_len = skb_network_header_len(skb);
1121 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1123 len += sizeof(*lso);
1124 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1125 FW_WR_IMMDLEN_V(len));
1126 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1127 LSO_FIRST_SLICE | LSO_LAST_SLICE |
1129 LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1130 LSO_IPHDR_LEN(l3hdr_len / 4) |
1131 LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1132 lso->c.ipid_ofst = htons(0);
1133 lso->c.mss = htons(ssi->gso_size);
1134 lso->c.seqno_offset = htonl(0);
1135 if (is_t4(adap->params.chip))
1136 lso->c.len = htonl(skb->len);
1138 lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
1139 cpl = (void *)(lso + 1);
1140 cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1141 TXPKT_IPHDR_LEN(l3hdr_len) |
1142 TXPKT_ETHHDR_LEN(eth_xtra_len);
1144 q->tx_cso += ssi->gso_segs;
1146 len += sizeof(*cpl);
1147 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1148 FW_WR_IMMDLEN_V(len));
1149 cpl = (void *)(wr + 1);
1150 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1151 cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1154 cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1157 if (skb_vlan_tag_present(skb)) {
1159 cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
1162 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1163 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
1164 cpl->pack = htons(0);
1165 cpl->len = htons(skb->len);
1166 cpl->ctrl1 = cpu_to_be64(cntrl);
1169 inline_tx_skb(skb, &q->q, cpl + 1);
1170 dev_consume_skb_any(skb);
1174 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1178 last_desc = q->q.pidx + ndesc - 1;
1179 if (last_desc >= q->q.size)
1180 last_desc -= q->q.size;
1181 q->q.sdesc[last_desc].skb = skb;
1182 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1185 txq_advance(&q->q, ndesc);
1187 ring_tx_db(adap, &q->q, ndesc);
1188 return NETDEV_TX_OK;
1192 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1193 * @q: the SGE control Tx queue
1195 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1196 * that send only immediate data (presently just the control queues) and
1197 * thus do not have any sk_buffs to release.
1199 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1201 int hw_cidx = ntohs(q->stat->cidx);
1202 int reclaim = hw_cidx - q->cidx;
1207 q->in_use -= reclaim;
1212 * is_imm - check whether a packet can be sent as immediate data
1215 * Returns true if a packet can be sent as a WR with immediate data.
1217 static inline int is_imm(const struct sk_buff *skb)
1219 return skb->len <= MAX_CTRL_WR_LEN;
1223 * ctrlq_check_stop - check if a control queue is full and should stop
1225 * @wr: most recent WR written to the queue
1227 * Check if a control queue has become full and should be stopped.
1228 * We clean up control queue descriptors very lazily, only when we are out.
1229 * If the queue is still full after reclaiming any completed descriptors
1230 * we suspend it and have the last WR wake it up.
1232 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1234 reclaim_completed_tx_imm(&q->q);
1235 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1236 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1243 * ctrl_xmit - send a packet through an SGE control Tx queue
1244 * @q: the control queue
1247 * Send a packet through an SGE control Tx queue. Packets sent through
1248 * a control queue must fit entirely as immediate data.
1250 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1253 struct fw_wr_hdr *wr;
1255 if (unlikely(!is_imm(skb))) {
1258 return NET_XMIT_DROP;
1261 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1262 spin_lock(&q->sendq.lock);
1264 if (unlikely(q->full)) {
1265 skb->priority = ndesc; /* save for restart */
1266 __skb_queue_tail(&q->sendq, skb);
1267 spin_unlock(&q->sendq.lock);
1271 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1272 inline_tx_skb(skb, &q->q, wr);
1274 txq_advance(&q->q, ndesc);
1275 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1276 ctrlq_check_stop(q, wr);
1278 ring_tx_db(q->adap, &q->q, ndesc);
1279 spin_unlock(&q->sendq.lock);
1282 return NET_XMIT_SUCCESS;
1286 * restart_ctrlq - restart a suspended control queue
1287 * @data: the control queue to restart
1289 * Resumes transmission on a suspended Tx control queue.
1291 static void restart_ctrlq(unsigned long data)
1293 struct sk_buff *skb;
1294 unsigned int written = 0;
1295 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1297 spin_lock(&q->sendq.lock);
1298 reclaim_completed_tx_imm(&q->q);
1299 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1301 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1302 struct fw_wr_hdr *wr;
1303 unsigned int ndesc = skb->priority; /* previously saved */
1306 * Write descriptors and free skbs outside the lock to limit
1307 * wait times. q->full is still set so new skbs will be queued.
1309 spin_unlock(&q->sendq.lock);
1311 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1312 inline_tx_skb(skb, &q->q, wr);
1316 txq_advance(&q->q, ndesc);
1317 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1318 unsigned long old = q->q.stops;
1320 ctrlq_check_stop(q, wr);
1321 if (q->q.stops != old) { /* suspended anew */
1322 spin_lock(&q->sendq.lock);
1327 ring_tx_db(q->adap, &q->q, written);
1330 spin_lock(&q->sendq.lock);
1333 ringdb: if (written)
1334 ring_tx_db(q->adap, &q->q, written);
1335 spin_unlock(&q->sendq.lock);
1339 * t4_mgmt_tx - send a management message
1340 * @adap: the adapter
1341 * @skb: the packet containing the management message
1343 * Send a management message through control queue 0.
1345 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1350 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1356 * is_ofld_imm - check whether a packet can be sent as immediate data
1359 * Returns true if a packet can be sent as an offload WR with immediate
1360 * data. We currently use the same limit as for Ethernet packets.
1362 static inline int is_ofld_imm(const struct sk_buff *skb)
1364 return skb->len <= MAX_IMM_TX_PKT_LEN;
1368 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1371 * Returns the number of flits needed for the given offload packet.
1372 * These packets are already fully constructed and no additional headers
1375 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1377 unsigned int flits, cnt;
1379 if (is_ofld_imm(skb))
1380 return DIV_ROUND_UP(skb->len, 8);
1382 flits = skb_transport_offset(skb) / 8U; /* headers */
1383 cnt = skb_shinfo(skb)->nr_frags;
1384 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1386 return flits + sgl_len(cnt);
1390 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1391 * @adap: the adapter
1392 * @q: the queue to stop
1394 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1395 * inability to map packets. A periodic timer attempts to restart
1398 static void txq_stop_maperr(struct sge_ofld_txq *q)
1402 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1403 q->adap->sge.txq_maperr);
1407 * ofldtxq_stop - stop an offload Tx queue that has become full
1408 * @q: the queue to stop
1409 * @skb: the packet causing the queue to become full
1411 * Stops an offload Tx queue that has become full and modifies the packet
1412 * being written to request a wakeup.
1414 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1416 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1418 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1424 * service_ofldq - restart a suspended offload queue
1425 * @q: the offload queue
1427 * Services an offload Tx queue by moving packets from its packet queue
1428 * to the HW Tx ring. The function starts and ends with the queue locked.
1430 static void service_ofldq(struct sge_ofld_txq *q)
1434 struct sk_buff *skb;
1435 unsigned int written = 0;
1436 unsigned int flits, ndesc;
1438 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1440 * We drop the lock but leave skb on sendq, thus retaining
1441 * exclusive access to the state of the queue.
1443 spin_unlock(&q->sendq.lock);
1445 reclaim_completed_tx(q->adap, &q->q, false);
1447 flits = skb->priority; /* previously saved */
1448 ndesc = flits_to_desc(flits);
1449 credits = txq_avail(&q->q) - ndesc;
1450 BUG_ON(credits < 0);
1451 if (unlikely(credits < TXQ_STOP_THRES))
1452 ofldtxq_stop(q, skb);
1454 pos = (u64 *)&q->q.desc[q->q.pidx];
1455 if (is_ofld_imm(skb))
1456 inline_tx_skb(skb, &q->q, pos);
1457 else if (map_skb(q->adap->pdev_dev, skb,
1458 (dma_addr_t *)skb->head)) {
1460 spin_lock(&q->sendq.lock);
1463 int last_desc, hdr_len = skb_transport_offset(skb);
1465 memcpy(pos, skb->data, hdr_len);
1466 write_sgl(skb, &q->q, (void *)pos + hdr_len,
1467 pos + flits, hdr_len,
1468 (dma_addr_t *)skb->head);
1469 #ifdef CONFIG_NEED_DMA_MAP_STATE
1470 skb->dev = q->adap->port[0];
1471 skb->destructor = deferred_unmap_destructor;
1473 last_desc = q->q.pidx + ndesc - 1;
1474 if (last_desc >= q->q.size)
1475 last_desc -= q->q.size;
1476 q->q.sdesc[last_desc].skb = skb;
1479 txq_advance(&q->q, ndesc);
1481 if (unlikely(written > 32)) {
1482 ring_tx_db(q->adap, &q->q, written);
1486 spin_lock(&q->sendq.lock);
1487 __skb_unlink(skb, &q->sendq);
1488 if (is_ofld_imm(skb))
1491 if (likely(written))
1492 ring_tx_db(q->adap, &q->q, written);
1496 * ofld_xmit - send a packet through an offload queue
1497 * @q: the Tx offload queue
1500 * Send an offload packet through an SGE offload queue.
1502 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1504 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1505 spin_lock(&q->sendq.lock);
1506 __skb_queue_tail(&q->sendq, skb);
1507 if (q->sendq.qlen == 1)
1509 spin_unlock(&q->sendq.lock);
1510 return NET_XMIT_SUCCESS;
1514 * restart_ofldq - restart a suspended offload queue
1515 * @data: the offload queue to restart
1517 * Resumes transmission on a suspended Tx offload queue.
1519 static void restart_ofldq(unsigned long data)
1521 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1523 spin_lock(&q->sendq.lock);
1524 q->full = 0; /* the queue actually is completely empty now */
1526 spin_unlock(&q->sendq.lock);
1530 * skb_txq - return the Tx queue an offload packet should use
1533 * Returns the Tx queue an offload packet should use as indicated by bits
1534 * 1-15 in the packet's queue_mapping.
1536 static inline unsigned int skb_txq(const struct sk_buff *skb)
1538 return skb->queue_mapping >> 1;
1542 * is_ctrl_pkt - return whether an offload packet is a control packet
1545 * Returns whether an offload packet should use an OFLD or a CTRL
1546 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1548 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1550 return skb->queue_mapping & 1;
1553 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1555 unsigned int idx = skb_txq(skb);
1557 if (unlikely(is_ctrl_pkt(skb))) {
1558 /* Single ctrl queue is a requirement for LE workaround path */
1559 if (adap->tids.nsftids)
1561 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1563 return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1567 * t4_ofld_send - send an offload packet
1568 * @adap: the adapter
1571 * Sends an offload packet. We use the packet queue_mapping to select the
1572 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1573 * should be sent as regular or control, bits 1-15 select the queue.
1575 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1580 ret = ofld_send(adap, skb);
1586 * cxgb4_ofld_send - send an offload packet
1587 * @dev: the net device
1590 * Sends an offload packet. This is an exported version of @t4_ofld_send,
1591 * intended for ULDs.
1593 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1595 return t4_ofld_send(netdev2adap(dev), skb);
1597 EXPORT_SYMBOL(cxgb4_ofld_send);
1599 static inline void copy_frags(struct sk_buff *skb,
1600 const struct pkt_gl *gl, unsigned int offset)
1604 /* usually there's just one frag */
1605 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1606 gl->frags[0].offset + offset,
1607 gl->frags[0].size - offset);
1608 skb_shinfo(skb)->nr_frags = gl->nfrags;
1609 for (i = 1; i < gl->nfrags; i++)
1610 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1611 gl->frags[i].offset,
1614 /* get a reference to the last page, we don't own it */
1615 get_page(gl->frags[gl->nfrags - 1].page);
1619 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1620 * @gl: the gather list
1621 * @skb_len: size of sk_buff main body if it carries fragments
1622 * @pull_len: amount of data to move to the sk_buff's main body
1624 * Builds an sk_buff from the given packet gather list. Returns the
1625 * sk_buff or %NULL if sk_buff allocation failed.
1627 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1628 unsigned int skb_len, unsigned int pull_len)
1630 struct sk_buff *skb;
1633 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1634 * size, which is expected since buffers are at least PAGE_SIZEd.
1635 * In this case packets up to RX_COPY_THRES have only one fragment.
1637 if (gl->tot_len <= RX_COPY_THRES) {
1638 skb = dev_alloc_skb(gl->tot_len);
1641 __skb_put(skb, gl->tot_len);
1642 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1644 skb = dev_alloc_skb(skb_len);
1647 __skb_put(skb, pull_len);
1648 skb_copy_to_linear_data(skb, gl->va, pull_len);
1650 copy_frags(skb, gl, pull_len);
1651 skb->len = gl->tot_len;
1652 skb->data_len = skb->len - pull_len;
1653 skb->truesize += skb->data_len;
1657 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1660 * t4_pktgl_free - free a packet gather list
1661 * @gl: the gather list
1663 * Releases the pages of a packet gather list. We do not own the last
1664 * page on the list and do not free it.
1666 static void t4_pktgl_free(const struct pkt_gl *gl)
1669 const struct page_frag *p;
1671 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1676 * Process an MPS trace packet. Give it an unused protocol number so it won't
1677 * be delivered to anyone and send it to the stack for capture.
1679 static noinline int handle_trace_pkt(struct adapter *adap,
1680 const struct pkt_gl *gl)
1682 struct sk_buff *skb;
1684 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1685 if (unlikely(!skb)) {
1690 if (is_t4(adap->params.chip))
1691 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1693 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1695 skb_reset_mac_header(skb);
1696 skb->protocol = htons(0xffff);
1697 skb->dev = adap->port[0];
1698 netif_receive_skb(skb);
1702 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1703 const struct cpl_rx_pkt *pkt)
1705 struct adapter *adapter = rxq->rspq.adap;
1706 struct sge *s = &adapter->sge;
1708 struct sk_buff *skb;
1710 skb = napi_get_frags(&rxq->rspq.napi);
1711 if (unlikely(!skb)) {
1713 rxq->stats.rx_drops++;
1717 copy_frags(skb, gl, s->pktshift);
1718 skb->len = gl->tot_len - s->pktshift;
1719 skb->data_len = skb->len;
1720 skb->truesize += skb->data_len;
1721 skb->ip_summed = CHECKSUM_UNNECESSARY;
1722 skb_record_rx_queue(skb, rxq->rspq.idx);
1723 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1724 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1727 if (unlikely(pkt->vlan_ex)) {
1728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1729 rxq->stats.vlan_ex++;
1731 ret = napi_gro_frags(&rxq->rspq.napi);
1732 if (ret == GRO_HELD)
1733 rxq->stats.lro_pkts++;
1734 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1735 rxq->stats.lro_merged++;
1737 rxq->stats.rx_cso++;
1741 * t4_ethrx_handler - process an ingress ethernet packet
1742 * @q: the response queue that received the packet
1743 * @rsp: the response queue descriptor holding the RX_PKT message
1744 * @si: the gather list of packet fragments
1746 * Process an ingress ethernet packet and deliver it to the stack.
1748 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1749 const struct pkt_gl *si)
1752 struct sk_buff *skb;
1753 const struct cpl_rx_pkt *pkt;
1754 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1755 struct sge *s = &q->adap->sge;
1756 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
1757 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1759 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1760 return handle_trace_pkt(q->adap, si);
1762 pkt = (const struct cpl_rx_pkt *)rsp;
1763 csum_ok = pkt->csum_calc && !pkt->err_vec &&
1764 (q->netdev->features & NETIF_F_RXCSUM);
1765 if ((pkt->l2info & htonl(RXF_TCP_F)) &&
1766 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1767 do_gro(rxq, si, pkt);
1771 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1772 if (unlikely(!skb)) {
1774 rxq->stats.rx_drops++;
1778 __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
1779 skb->protocol = eth_type_trans(skb, q->netdev);
1780 skb_record_rx_queue(skb, q->idx);
1781 if (skb->dev->features & NETIF_F_RXHASH)
1782 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1787 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
1788 if (!pkt->ip_frag) {
1789 skb->ip_summed = CHECKSUM_UNNECESSARY;
1790 rxq->stats.rx_cso++;
1791 } else if (pkt->l2info & htonl(RXF_IP_F)) {
1792 __sum16 c = (__force __sum16)pkt->csum;
1793 skb->csum = csum_unfold(c);
1794 skb->ip_summed = CHECKSUM_COMPLETE;
1795 rxq->stats.rx_cso++;
1798 skb_checksum_none_assert(skb);
1800 if (unlikely(pkt->vlan_ex)) {
1801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1802 rxq->stats.vlan_ex++;
1804 netif_receive_skb(skb);
1809 * restore_rx_bufs - put back a packet's Rx buffers
1810 * @si: the packet gather list
1811 * @q: the SGE free list
1812 * @frags: number of FL buffers to restore
1814 * Puts back on an FL the Rx buffers associated with @si. The buffers
1815 * have already been unmapped and are left unmapped, we mark them so to
1816 * prevent further unmapping attempts.
1818 * This function undoes a series of @unmap_rx_buf calls when we find out
1819 * that the current packet can't be processed right away afterall and we
1820 * need to come back to it later. This is a very rare event and there's
1821 * no effort to make this particularly efficient.
1823 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1826 struct rx_sw_desc *d;
1830 q->cidx = q->size - 1;
1833 d = &q->sdesc[q->cidx];
1834 d->page = si->frags[frags].page;
1835 d->dma_addr |= RX_UNMAPPED_BUF;
1841 * is_new_response - check if a response is newly written
1842 * @r: the response descriptor
1843 * @q: the response queue
1845 * Returns true if a response descriptor contains a yet unprocessed
1848 static inline bool is_new_response(const struct rsp_ctrl *r,
1849 const struct sge_rspq *q)
1851 return RSPD_GEN(r->type_gen) == q->gen;
1855 * rspq_next - advance to the next entry in a response queue
1858 * Updates the state of a response queue to advance it to the next entry.
1860 static inline void rspq_next(struct sge_rspq *q)
1862 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1863 if (unlikely(++q->cidx == q->size)) {
1866 q->cur_desc = q->desc;
1871 * process_responses - process responses from an SGE response queue
1872 * @q: the ingress queue to process
1873 * @budget: how many responses can be processed in this round
1875 * Process responses from an SGE response queue up to the supplied budget.
1876 * Responses include received packets as well as control messages from FW
1879 * Additionally choose the interrupt holdoff time for the next interrupt
1880 * on this queue. If the system is under memory shortage use a fairly
1881 * long delay to help recovery.
1883 static int process_responses(struct sge_rspq *q, int budget)
1886 int budget_left = budget;
1887 const struct rsp_ctrl *rc;
1888 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1889 struct adapter *adapter = q->adap;
1890 struct sge *s = &adapter->sge;
1892 while (likely(budget_left)) {
1893 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
1894 if (!is_new_response(rc, q))
1898 rsp_type = RSPD_TYPE(rc->type_gen);
1899 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1900 struct page_frag *fp;
1902 const struct rx_sw_desc *rsd;
1903 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
1905 if (len & RSPD_NEWBUF) {
1906 if (likely(q->offset > 0)) {
1907 free_rx_bufs(q->adap, &rxq->fl, 1);
1910 len = RSPD_LEN(len);
1914 /* gather packet fragments */
1915 for (frags = 0, fp = si.frags; ; frags++, fp++) {
1916 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1917 bufsz = get_buf_size(adapter, rsd);
1918 fp->page = rsd->page;
1919 fp->offset = q->offset;
1920 fp->size = min(bufsz, len);
1924 unmap_rx_buf(q->adap, &rxq->fl);
1928 * Last buffer remains mapped so explicitly make it
1929 * coherent for CPU access.
1931 dma_sync_single_for_cpu(q->adap->pdev_dev,
1933 fp->size, DMA_FROM_DEVICE);
1935 si.va = page_address(si.frags[0].page) +
1939 si.nfrags = frags + 1;
1940 ret = q->handler(q, q->cur_desc, &si);
1941 if (likely(ret == 0))
1942 q->offset += ALIGN(fp->size, s->fl_align);
1944 restore_rx_bufs(&si, &rxq->fl, frags);
1945 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1946 ret = q->handler(q, q->cur_desc, NULL);
1948 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1951 if (unlikely(ret)) {
1952 /* couldn't process descriptor, back off for recovery */
1953 q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1961 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
1962 __refill_fl(q->adap, &rxq->fl);
1963 return budget - budget_left;
1967 * napi_rx_handler - the NAPI handler for Rx processing
1968 * @napi: the napi instance
1969 * @budget: how many packets we can process in this round
1971 * Handler for new data events when using NAPI. This does not need any
1972 * locking or protection from interrupts as data interrupts are off at
1973 * this point and other adapter interrupts do not interfere (the latter
1974 * in not a concern at all with MSI-X as non-data interrupts then have
1975 * a separate handler).
1977 static int napi_rx_handler(struct napi_struct *napi, int budget)
1979 unsigned int params;
1980 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
1981 int work_done = process_responses(q, budget);
1984 if (likely(work_done < budget)) {
1987 napi_complete(napi);
1988 timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
1990 if (q->adaptive_rx) {
1991 if (work_done > max(timer_pkt_quota[timer_index],
1993 timer_index = (timer_index + 1);
1995 timer_index = timer_index - 1;
1997 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
1998 q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
2000 params = q->next_intr_params;
2002 params = q->next_intr_params;
2003 q->next_intr_params = q->intr_params;
2006 params = QINTR_TIMER_IDX(7);
2008 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2010 /* If we don't have access to the new User GTS (T5+), use the old
2011 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2013 if (unlikely(q->bar2_addr == NULL)) {
2014 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2015 val | INGRESSQID_V((u32)q->cntxt_id));
2017 writel(val | INGRESSQID_V(q->bar2_qid),
2018 q->bar2_addr + SGE_UDB_GTS);
2025 * The MSI-X interrupt handler for an SGE response queue.
2027 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2029 struct sge_rspq *q = cookie;
2031 napi_schedule(&q->napi);
2036 * Process the indirect interrupt entries in the interrupt queue and kick off
2037 * NAPI for each queue that has generated an entry.
2039 static unsigned int process_intrq(struct adapter *adap)
2041 unsigned int credits;
2042 const struct rsp_ctrl *rc;
2043 struct sge_rspq *q = &adap->sge.intrq;
2046 spin_lock(&adap->sge.intrq_lock);
2047 for (credits = 0; ; credits++) {
2048 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2049 if (!is_new_response(rc, q))
2053 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
2054 unsigned int qid = ntohl(rc->pldbuflen_qid);
2056 qid -= adap->sge.ingr_start;
2057 napi_schedule(&adap->sge.ingr_map[qid]->napi);
2063 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2065 /* If we don't have access to the new User GTS (T5+), use the old
2066 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2068 if (unlikely(q->bar2_addr == NULL)) {
2069 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2070 val | INGRESSQID_V(q->cntxt_id));
2072 writel(val | INGRESSQID_V(q->bar2_qid),
2073 q->bar2_addr + SGE_UDB_GTS);
2076 spin_unlock(&adap->sge.intrq_lock);
2081 * The MSI interrupt handler, which handles data events from SGE response queues
2082 * as well as error and other async events as they all use the same MSI vector.
2084 static irqreturn_t t4_intr_msi(int irq, void *cookie)
2086 struct adapter *adap = cookie;
2088 t4_slow_intr_handler(adap);
2089 process_intrq(adap);
2094 * Interrupt handler for legacy INTx interrupts.
2095 * Handles data events from SGE response queues as well as error and other
2096 * async events as they all use the same interrupt line.
2098 static irqreturn_t t4_intr_intx(int irq, void *cookie)
2100 struct adapter *adap = cookie;
2102 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2103 if (t4_slow_intr_handler(adap) | process_intrq(adap))
2105 return IRQ_NONE; /* probably shared interrupt */
2109 * t4_intr_handler - select the top-level interrupt handler
2110 * @adap: the adapter
2112 * Selects the top-level interrupt handler based on the type of interrupts
2113 * (MSI-X, MSI, or INTx).
2115 irq_handler_t t4_intr_handler(struct adapter *adap)
2117 if (adap->flags & USING_MSIX)
2118 return t4_sge_intr_msix;
2119 if (adap->flags & USING_MSI)
2121 return t4_intr_intx;
2124 static void sge_rx_timer_cb(unsigned long data)
2127 unsigned int i, idma_same_state_cnt[2];
2128 struct adapter *adap = (struct adapter *)data;
2129 struct sge *s = &adap->sge;
2131 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
2132 for (m = s->starving_fl[i]; m; m &= m - 1) {
2133 struct sge_eth_rxq *rxq;
2134 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2135 struct sge_fl *fl = s->egr_map[id];
2137 clear_bit(id, s->starving_fl);
2138 smp_mb__after_atomic();
2140 if (fl_starving(fl)) {
2141 rxq = container_of(fl, struct sge_eth_rxq, fl);
2142 if (napi_reschedule(&rxq->rspq.napi))
2145 set_bit(id, s->starving_fl);
2149 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
2150 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
2151 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2153 for (i = 0; i < 2; i++) {
2154 u32 debug0, debug11;
2156 /* If the Ingress DMA Same State Counter ("timer") is less
2157 * than 1s, then we can reset our synthesized Stall Timer and
2158 * continue. If we have previously emitted warnings about a
2159 * potential stalled Ingress Queue, issue a note indicating
2160 * that the Ingress Queue has resumed forward progress.
2162 if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
2163 if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
2164 CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
2166 s->idma_stalled[i]/HZ);
2167 s->idma_stalled[i] = 0;
2171 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
2172 * domain. The first time we get here it'll be because we
2173 * passed the 1s Threshold; each additional time it'll be
2174 * because the RX Timer Callback is being fired on its regular
2177 * If the stall is below our Potential Hung Ingress Queue
2178 * Warning Threshold, continue.
2180 if (s->idma_stalled[i] == 0)
2181 s->idma_stalled[i] = HZ;
2183 s->idma_stalled[i] += RX_QCHECK_PERIOD;
2185 if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
2188 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
2189 if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
2192 /* Read and save the SGE IDMA State and Queue ID information.
2193 * We do this every time in case it changes across time ...
2195 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
2196 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2197 s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
2199 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
2200 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2201 s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
2203 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
2204 i, s->idma_qid[i], s->idma_state[i],
2205 s->idma_stalled[i]/HZ, debug0, debug11);
2206 t4_sge_decode_idma_state(adap, s->idma_state[i]);
2209 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2212 static void sge_tx_timer_cb(unsigned long data)
2215 unsigned int i, budget;
2216 struct adapter *adap = (struct adapter *)data;
2217 struct sge *s = &adap->sge;
2219 for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
2220 for (m = s->txq_maperr[i]; m; m &= m - 1) {
2221 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2222 struct sge_ofld_txq *txq = s->egr_map[id];
2224 clear_bit(id, s->txq_maperr);
2225 tasklet_schedule(&txq->qresume_tsk);
2228 budget = MAX_TIMER_TX_RECLAIM;
2229 i = s->ethtxq_rover;
2231 struct sge_eth_txq *q = &s->ethtxq[i];
2234 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2235 __netif_tx_trylock(q->txq)) {
2236 int avail = reclaimable(&q->q);
2242 free_tx_desc(adap, &q->q, avail, true);
2243 q->q.in_use -= avail;
2246 __netif_tx_unlock(q->txq);
2249 if (++i >= s->ethqsets)
2251 } while (budget && i != s->ethtxq_rover);
2252 s->ethtxq_rover = i;
2253 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2257 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2258 * @adapter: the adapter
2259 * @qid: the SGE Queue ID
2260 * @qtype: the SGE Queue Type (Egress or Ingress)
2261 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2263 * Returns the BAR2 address for the SGE Queue Registers associated with
2264 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2265 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2266 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2267 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2269 static void __iomem *bar2_address(struct adapter *adapter,
2271 enum t4_bar2_qtype qtype,
2272 unsigned int *pbar2_qid)
2277 ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
2278 &bar2_qoffset, pbar2_qid);
2282 return adapter->bar2 + bar2_qoffset;
2285 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2286 struct net_device *dev, int intr_idx,
2287 struct sge_fl *fl, rspq_handler_t hnd)
2291 struct sge *s = &adap->sge;
2292 struct port_info *pi = netdev_priv(dev);
2294 /* Size needs to be multiple of 16, including status entry. */
2295 iq->size = roundup(iq->size, 16);
2297 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2298 &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
2302 memset(&c, 0, sizeof(c));
2303 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
2304 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2305 FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
2306 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
2308 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2309 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
2310 FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
2311 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2313 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
2314 FW_IQ_CMD_IQGTSMODE_F |
2315 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
2316 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
2317 c.iqsize = htons(iq->size);
2318 c.iqaddr = cpu_to_be64(iq->phys_addr);
2321 fl->size = roundup(fl->size, 8);
2322 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2323 sizeof(struct rx_sw_desc), &fl->addr,
2324 &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2328 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2329 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
2330 FW_IQ_CMD_FL0FETCHRO_F |
2331 FW_IQ_CMD_FL0DATARO_F |
2332 FW_IQ_CMD_FL0PADEN_F);
2333 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
2334 FW_IQ_CMD_FL0FBMAX_V(3));
2335 c.fl0size = htons(flsz);
2336 c.fl0addr = cpu_to_be64(fl->addr);
2339 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2343 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2344 iq->cur_desc = iq->desc;
2347 iq->next_intr_params = iq->intr_params;
2348 iq->cntxt_id = ntohs(c.iqid);
2349 iq->abs_id = ntohs(c.physiqid);
2350 iq->bar2_addr = bar2_address(adap,
2352 T4_BAR2_QTYPE_INGRESS,
2354 iq->size--; /* subtract status entry */
2358 /* set offset to -1 to distinguish ingress queues without FL */
2359 iq->offset = fl ? 0 : -1;
2361 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2364 fl->cntxt_id = ntohs(c.fl0id);
2365 fl->avail = fl->pend_cred = 0;
2366 fl->pidx = fl->cidx = 0;
2367 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2368 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2370 /* Note, we must initialize the BAR2 Free List User Doorbell
2371 * information before refilling the Free List!
2373 fl->bar2_addr = bar2_address(adap,
2375 T4_BAR2_QTYPE_EGRESS,
2377 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2385 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2386 iq->desc, iq->phys_addr);
2389 if (fl && fl->desc) {
2392 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2393 fl->desc, fl->addr);
2399 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2402 q->bar2_addr = bar2_address(adap,
2404 T4_BAR2_QTYPE_EGRESS,
2407 q->cidx = q->pidx = 0;
2408 q->stops = q->restarts = 0;
2409 q->stat = (void *)&q->desc[q->size];
2410 spin_lock_init(&q->db_lock);
2411 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2414 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2415 struct net_device *dev, struct netdev_queue *netdevq,
2419 struct fw_eq_eth_cmd c;
2420 struct sge *s = &adap->sge;
2421 struct port_info *pi = netdev_priv(dev);
2423 /* Add status entries */
2424 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2426 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2427 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2428 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2429 netdev_queue_numa_node_read(netdevq));
2433 memset(&c, 0, sizeof(c));
2434 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
2435 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2436 FW_EQ_ETH_CMD_PFN_V(adap->fn) |
2437 FW_EQ_ETH_CMD_VFN_V(0));
2438 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
2439 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
2440 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2441 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2442 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
2443 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
2444 FW_EQ_ETH_CMD_FETCHRO_V(1) |
2445 FW_EQ_ETH_CMD_IQID_V(iqid));
2446 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
2447 FW_EQ_ETH_CMD_FBMAX_V(3) |
2448 FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
2449 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2450 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2452 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2454 kfree(txq->q.sdesc);
2455 txq->q.sdesc = NULL;
2456 dma_free_coherent(adap->pdev_dev,
2457 nentries * sizeof(struct tx_desc),
2458 txq->q.desc, txq->q.phys_addr);
2463 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
2465 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2466 txq->mapping_err = 0;
2470 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2471 struct net_device *dev, unsigned int iqid,
2472 unsigned int cmplqid)
2475 struct fw_eq_ctrl_cmd c;
2476 struct sge *s = &adap->sge;
2477 struct port_info *pi = netdev_priv(dev);
2479 /* Add status entries */
2480 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2482 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2483 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2484 NULL, 0, NUMA_NO_NODE);
2488 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
2489 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2490 FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
2491 FW_EQ_CTRL_CMD_VFN_V(0));
2492 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
2493 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
2494 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
2495 c.physeqid_pkd = htonl(0);
2496 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
2497 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
2498 FW_EQ_CTRL_CMD_FETCHRO_F |
2499 FW_EQ_CTRL_CMD_IQID_V(iqid));
2500 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
2501 FW_EQ_CTRL_CMD_FBMAX_V(3) |
2502 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
2503 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
2504 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2506 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2508 dma_free_coherent(adap->pdev_dev,
2509 nentries * sizeof(struct tx_desc),
2510 txq->q.desc, txq->q.phys_addr);
2515 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
2517 skb_queue_head_init(&txq->sendq);
2518 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2523 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2524 struct net_device *dev, unsigned int iqid)
2527 struct fw_eq_ofld_cmd c;
2528 struct sge *s = &adap->sge;
2529 struct port_info *pi = netdev_priv(dev);
2531 /* Add status entries */
2532 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2534 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2535 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2536 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2541 memset(&c, 0, sizeof(c));
2542 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
2543 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2544 FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
2545 FW_EQ_OFLD_CMD_VFN_V(0));
2546 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
2547 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
2548 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
2549 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
2550 FW_EQ_OFLD_CMD_FETCHRO_F |
2551 FW_EQ_OFLD_CMD_IQID_V(iqid));
2552 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
2553 FW_EQ_OFLD_CMD_FBMAX_V(3) |
2554 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
2555 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
2556 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2558 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2560 kfree(txq->q.sdesc);
2561 txq->q.sdesc = NULL;
2562 dma_free_coherent(adap->pdev_dev,
2563 nentries * sizeof(struct tx_desc),
2564 txq->q.desc, txq->q.phys_addr);
2569 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
2571 skb_queue_head_init(&txq->sendq);
2572 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2574 txq->mapping_err = 0;
2578 static void free_txq(struct adapter *adap, struct sge_txq *q)
2580 struct sge *s = &adap->sge;
2582 dma_free_coherent(adap->pdev_dev,
2583 q->size * sizeof(struct tx_desc) + s->stat_len,
2584 q->desc, q->phys_addr);
2590 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2593 struct sge *s = &adap->sge;
2594 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2596 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2597 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2598 rq->cntxt_id, fl_id, 0xffff);
2599 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2600 rq->desc, rq->phys_addr);
2601 netif_napi_del(&rq->napi);
2603 rq->cntxt_id = rq->abs_id = 0;
2607 free_rx_bufs(adap, fl, fl->avail);
2608 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2609 fl->desc, fl->addr);
2618 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
2619 * @adap: the adapter
2620 * @n: number of queues
2621 * @q: pointer to first queue
2623 * Release the resources of a consecutive block of offload Rx queues.
2625 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2627 for ( ; n; n--, q++)
2629 free_rspq_fl(adap, &q->rspq,
2630 q->fl.size ? &q->fl : NULL);
2634 * t4_free_sge_resources - free SGE resources
2635 * @adap: the adapter
2637 * Frees resources used by the SGE queue sets.
2639 void t4_free_sge_resources(struct adapter *adap)
2642 struct sge_eth_rxq *eq = adap->sge.ethrxq;
2643 struct sge_eth_txq *etq = adap->sge.ethtxq;
2645 /* clean up Ethernet Tx/Rx queues */
2646 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2648 free_rspq_fl(adap, &eq->rspq,
2649 eq->fl.size ? &eq->fl : NULL);
2651 t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2653 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2654 kfree(etq->q.sdesc);
2655 free_txq(adap, &etq->q);
2659 /* clean up RDMA and iSCSI Rx queues */
2660 t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
2661 t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
2662 t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
2664 /* clean up offload Tx queues */
2665 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2666 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2669 tasklet_kill(&q->qresume_tsk);
2670 t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2672 free_tx_desc(adap, &q->q, q->q.in_use, false);
2674 __skb_queue_purge(&q->sendq);
2675 free_txq(adap, &q->q);
2679 /* clean up control Tx queues */
2680 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2681 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2684 tasklet_kill(&cq->qresume_tsk);
2685 t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2687 __skb_queue_purge(&cq->sendq);
2688 free_txq(adap, &cq->q);
2692 if (adap->sge.fw_evtq.desc)
2693 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2695 if (adap->sge.intrq.desc)
2696 free_rspq_fl(adap, &adap->sge.intrq, NULL);
2698 /* clear the reverse egress queue map */
2699 memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
2702 void t4_sge_start(struct adapter *adap)
2704 adap->sge.ethtxq_rover = 0;
2705 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2706 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2710 * t4_sge_stop - disable SGE operation
2711 * @adap: the adapter
2713 * Stop tasklets and timers associated with the DMA engine. Note that
2714 * this is effective only if measures have been taken to disable any HW
2715 * events that may restart them.
2717 void t4_sge_stop(struct adapter *adap)
2720 struct sge *s = &adap->sge;
2722 if (in_interrupt()) /* actions below require waiting */
2725 if (s->rx_timer.function)
2726 del_timer_sync(&s->rx_timer);
2727 if (s->tx_timer.function)
2728 del_timer_sync(&s->tx_timer);
2730 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2731 struct sge_ofld_txq *q = &s->ofldtxq[i];
2734 tasklet_kill(&q->qresume_tsk);
2736 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2737 struct sge_ctrl_txq *cq = &s->ctrlq[i];
2740 tasklet_kill(&cq->qresume_tsk);
2745 * t4_sge_init_soft - grab core SGE values needed by SGE code
2746 * @adap: the adapter
2748 * We need to grab the SGE operating parameters that we need to have
2749 * in order to do our job and make sure we can live with them.
2752 static int t4_sge_init_soft(struct adapter *adap)
2754 struct sge *s = &adap->sge;
2755 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2756 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2757 u32 ingress_rx_threshold;
2760 * Verify that CPL messages are going to the Ingress Queue for
2761 * process_responses() and that only packet data is going to the
2764 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
2765 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2766 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2771 * Validate the Host Buffer Register Array indices that we want to
2774 * XXX Note that we should really read through the Host Buffer Size
2775 * XXX register array and find the indices of the Buffer Sizes which
2776 * XXX meet our needs!
2778 #define READ_FL_BUF(x) \
2779 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
2781 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2782 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2783 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2784 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2786 /* We only bother using the Large Page logic if the Large Page Buffer
2787 * is larger than our Page Size Buffer.
2789 if (fl_large_pg <= fl_small_pg)
2794 /* The Page Size Buffer must be exactly equal to our Page Size and the
2795 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2797 if (fl_small_pg != PAGE_SIZE ||
2798 (fl_large_pg & (fl_large_pg-1)) != 0) {
2799 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2800 fl_small_pg, fl_large_pg);
2804 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2806 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2807 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2808 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2809 fl_small_mtu, fl_large_mtu);
2814 * Retrieve our RX interrupt holdoff timer values and counter
2815 * threshold values from the SGE parameters.
2817 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
2818 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
2819 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
2820 s->timer_val[0] = core_ticks_to_us(adap,
2821 TIMERVALUE0_G(timer_value_0_and_1));
2822 s->timer_val[1] = core_ticks_to_us(adap,
2823 TIMERVALUE1_G(timer_value_0_and_1));
2824 s->timer_val[2] = core_ticks_to_us(adap,
2825 TIMERVALUE2_G(timer_value_2_and_3));
2826 s->timer_val[3] = core_ticks_to_us(adap,
2827 TIMERVALUE3_G(timer_value_2_and_3));
2828 s->timer_val[4] = core_ticks_to_us(adap,
2829 TIMERVALUE4_G(timer_value_4_and_5));
2830 s->timer_val[5] = core_ticks_to_us(adap,
2831 TIMERVALUE5_G(timer_value_4_and_5));
2833 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
2834 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
2835 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
2836 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
2837 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
2843 * t4_sge_init - initialize SGE
2844 * @adap: the adapter
2846 * Perform low-level SGE code initialization needed every time after a
2849 int t4_sge_init(struct adapter *adap)
2851 struct sge *s = &adap->sge;
2852 u32 sge_control, sge_control2, sge_conm_ctrl;
2853 unsigned int ingpadboundary, ingpackboundary;
2854 int ret, egress_threshold;
2857 * Ingress Padding Boundary and Egress Status Page Size are set up by
2858 * t4_fixup_host_params().
2860 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
2861 s->pktshift = PKTSHIFT_G(sge_control);
2862 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
2864 /* T4 uses a single control field to specify both the PCIe Padding and
2865 * Packing Boundary. T5 introduced the ability to specify these
2866 * separately. The actual Ingress Packet Data alignment boundary
2867 * within Packed Buffer Mode is the maximum of these two
2870 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
2871 INGPADBOUNDARY_SHIFT_X);
2872 if (is_t4(adap->params.chip)) {
2873 s->fl_align = ingpadboundary;
2875 /* T5 has a different interpretation of one of the PCIe Packing
2878 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
2879 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
2880 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2881 ingpackboundary = 16;
2883 ingpackboundary = 1 << (ingpackboundary +
2884 INGPACKBOUNDARY_SHIFT_X);
2886 s->fl_align = max(ingpadboundary, ingpackboundary);
2889 ret = t4_sge_init_soft(adap);
2894 * A FL with <= fl_starve_thres buffers is starving and a periodic
2895 * timer will attempt to refill it. This needs to be larger than the
2896 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2897 * stuck waiting for new packets while the SGE is waiting for us to
2898 * give it more Free List entries. (Note that the SGE's Egress
2899 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2900 * there was only a single field to control this. For T5 there's the
2901 * original field which now only applies to Unpacked Mode Free List
2902 * buffers and a new field which only applies to Packed Mode Free List
2905 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
2906 if (is_t4(adap->params.chip))
2907 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
2909 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
2910 s->fl_starve_thres = 2*egress_threshold + 1;
2912 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2913 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2914 s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2915 s->idma_stalled[0] = 0;
2916 s->idma_stalled[1] = 0;
2917 spin_lock_init(&s->intrq_lock);