2 * This file is part of the Chelsio T3 Ethernet driver.
4 * Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_vlan.h>
17 #include <linux/tcp.h>
18 #include <linux/dma-mapping.h>
23 #include "firmware_exports.h"
27 #define SGE_RX_SM_BUF_SIZE 1536
28 #define SGE_RX_COPY_THRES 256
30 # define SGE_RX_DROP_THRES 16
33 * Period of the Tx buffer reclaim timer. This timer does not need to run
34 * frequently as Tx buffers are usually reclaimed by new Tx packets.
36 #define TX_RECLAIM_PERIOD (HZ / 4)
38 /* WR size in bytes */
39 #define WR_LEN (WR_FLITS * 8)
42 * Types of Tx queues in each queue set. Order here matters, do not change.
44 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
46 /* Values for sge_txq.flags */
48 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
49 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
53 u64 flit[TX_DESC_FLITS];
63 struct tx_sw_desc { /* SW state per Tx descriptor */
67 struct rx_sw_desc { /* SW state per Rx descriptor */
69 DECLARE_PCI_UNMAP_ADDR(dma_addr);
72 struct rsp_desc { /* response queue descriptor */
73 struct rss_header rss_hdr;
80 struct unmap_info { /* packet unmapping info, overlays skb->cb */
81 int sflit; /* start flit of first SGL entry in Tx descriptor */
82 u16 fragidx; /* first page fragment in current Tx descriptor */
83 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
84 u32 len; /* mapped length of skb main body */
88 * Maps a number of flits to the number of Tx descriptors that can hold them.
91 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
93 * HW allows up to 4 descriptors to be combined into a WR.
95 static u8 flit_desc_map[] = {
97 #if SGE_NUM_GENBITS == 1
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
102 #elif SGE_NUM_GENBITS == 2
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
106 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
108 # error "SGE_NUM_GENBITS must be 1 or 2"
112 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
114 return container_of(q, struct sge_qset, fl[qidx]);
117 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
119 return container_of(q, struct sge_qset, rspq);
122 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
124 return container_of(q, struct sge_qset, txq[qidx]);
128 * refill_rspq - replenish an SGE response queue
129 * @adapter: the adapter
130 * @q: the response queue to replenish
131 * @credits: how many new responses to make available
133 * Replenishes a response queue by making the supplied number of responses
136 static inline void refill_rspq(struct adapter *adapter,
137 const struct sge_rspq *q, unsigned int credits)
139 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
140 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
144 * need_skb_unmap - does the platform need unmapping of sk_buffs?
146 * Returns true if the platfrom needs sk_buff unmapping. The compiler
147 * optimizes away unecessary code if this returns true.
149 static inline int need_skb_unmap(void)
152 * This structure is used to tell if the platfrom needs buffer
153 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
156 DECLARE_PCI_UNMAP_ADDR(addr);
159 return sizeof(struct dummy) != 0;
163 * unmap_skb - unmap a packet main body and its page fragments
165 * @q: the Tx queue containing Tx descriptors for the packet
166 * @cidx: index of Tx descriptor
167 * @pdev: the PCI device
169 * Unmap the main body of an sk_buff and its page fragments, if any.
170 * Because of the fairly complicated structure of our SGLs and the desire
171 * to conserve space for metadata, we keep the information necessary to
172 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
173 * in the Tx descriptors (the physical addresses of the various data
174 * buffers). The send functions initialize the state in skb->cb so we
175 * can unmap the buffers held in the first Tx descriptor here, and we
176 * have enough information at this point to update the state for the next
179 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
180 unsigned int cidx, struct pci_dev *pdev)
182 const struct sg_ent *sgp;
183 struct unmap_info *ui = (struct unmap_info *)skb->cb;
184 int nfrags, frag_idx, curflit, j = ui->addr_idx;
186 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
189 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
191 ui->len = 0; /* so we know for next descriptor for this skb */
195 frag_idx = ui->fragidx;
196 curflit = ui->sflit + 1 + j;
197 nfrags = skb_shinfo(skb)->nr_frags;
199 while (frag_idx < nfrags && curflit < WR_FLITS) {
200 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
201 skb_shinfo(skb)->frags[frag_idx].size,
212 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
213 ui->fragidx = frag_idx;
215 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
220 * free_tx_desc - reclaims Tx descriptors and their buffers
221 * @adapter: the adapter
222 * @q: the Tx queue to reclaim descriptors from
223 * @n: the number of descriptors to reclaim
225 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
226 * Tx buffers. Called with the Tx queue lock held.
228 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
231 struct tx_sw_desc *d;
232 struct pci_dev *pdev = adapter->pdev;
233 unsigned int cidx = q->cidx;
237 if (d->skb) { /* an SGL is present */
238 if (need_skb_unmap())
239 unmap_skb(d->skb, q, cidx, pdev);
240 if (d->skb->priority == cidx)
244 if (++cidx == q->size) {
253 * reclaim_completed_tx - reclaims completed Tx descriptors
254 * @adapter: the adapter
255 * @q: the Tx queue to reclaim completed descriptors from
257 * Reclaims Tx descriptors that the SGE has indicated it has processed,
258 * and frees the associated buffers if possible. Called with the Tx
261 static inline void reclaim_completed_tx(struct adapter *adapter,
264 unsigned int reclaim = q->processed - q->cleaned;
267 free_tx_desc(adapter, q, reclaim);
268 q->cleaned += reclaim;
269 q->in_use -= reclaim;
274 * should_restart_tx - are there enough resources to restart a Tx queue?
277 * Checks if there are enough descriptors to restart a suspended Tx queue.
279 static inline int should_restart_tx(const struct sge_txq *q)
281 unsigned int r = q->processed - q->cleaned;
283 return q->in_use - r < (q->size >> 1);
287 * free_rx_bufs - free the Rx buffers on an SGE free list
288 * @pdev: the PCI device associated with the adapter
289 * @rxq: the SGE free list to clean up
291 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
292 * this queue should be stopped before calling this function.
294 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
296 unsigned int cidx = q->cidx;
298 while (q->credits--) {
299 struct rx_sw_desc *d = &q->sdesc[cidx];
301 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
302 q->buf_size, PCI_DMA_FROMDEVICE);
305 if (++cidx == q->size)
311 * add_one_rx_buf - add a packet buffer to a free-buffer list
312 * @skb: the buffer to add
313 * @len: the buffer length
314 * @d: the HW Rx descriptor to write
315 * @sd: the SW Rx descriptor to write
316 * @gen: the generation bit value
317 * @pdev: the PCI device associated with the adapter
319 * Add a buffer of the given length to the supplied HW and SW Rx
322 static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
323 struct rx_desc *d, struct rx_sw_desc *sd,
324 unsigned int gen, struct pci_dev *pdev)
329 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
330 pci_unmap_addr_set(sd, dma_addr, mapping);
332 d->addr_lo = cpu_to_be32(mapping);
333 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
335 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
336 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
340 * refill_fl - refill an SGE free-buffer list
341 * @adapter: the adapter
342 * @q: the free-list to refill
343 * @n: the number of new buffers to allocate
344 * @gfp: the gfp flags for allocating new buffers
346 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
347 * allocated with the supplied gfp flags. The caller must assure that
348 * @n does not exceed the queue's capacity.
350 static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
352 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
353 struct rx_desc *d = &q->desc[q->pidx];
356 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
361 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
364 if (++q->pidx == q->size) {
373 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
376 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
378 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
382 * recycle_rx_buf - recycle a receive buffer
383 * @adapter: the adapter
384 * @q: the SGE free list
385 * @idx: index of buffer to recycle
387 * Recycles the specified buffer on the given free list by adding it at
388 * the next available slot on the list.
390 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
393 struct rx_desc *from = &q->desc[idx];
394 struct rx_desc *to = &q->desc[q->pidx];
396 q->sdesc[q->pidx] = q->sdesc[idx];
397 to->addr_lo = from->addr_lo; /* already big endian */
398 to->addr_hi = from->addr_hi; /* likewise */
400 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
401 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
404 if (++q->pidx == q->size) {
408 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
412 * alloc_ring - allocate resources for an SGE descriptor ring
413 * @pdev: the PCI device
414 * @nelem: the number of descriptors
415 * @elem_size: the size of each descriptor
416 * @sw_size: the size of the SW state associated with each ring element
417 * @phys: the physical address of the allocated ring
418 * @metadata: address of the array holding the SW state for the ring
420 * Allocates resources for an SGE descriptor ring, such as Tx queues,
421 * free buffer lists, or response queues. Each SGE ring requires
422 * space for its HW descriptors plus, optionally, space for the SW state
423 * associated with each HW entry (the metadata). The function returns
424 * three values: the virtual address for the HW ring (the return value
425 * of the function), the physical address of the HW ring, and the address
428 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
429 size_t sw_size, dma_addr_t *phys, void *metadata)
431 size_t len = nelem * elem_size;
433 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
438 s = kcalloc(nelem, sw_size, GFP_KERNEL);
441 dma_free_coherent(&pdev->dev, len, p, *phys);
446 *(void **)metadata = s;
452 * free_qset - free the resources of an SGE queue set
453 * @adapter: the adapter owning the queue set
456 * Release the HW and SW resources associated with an SGE queue set, such
457 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
458 * queue set must be quiesced prior to calling this.
460 void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
463 struct pci_dev *pdev = adapter->pdev;
465 if (q->tx_reclaim_timer.function)
466 del_timer_sync(&q->tx_reclaim_timer);
468 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
470 spin_lock(&adapter->sge.reg_lock);
471 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
472 spin_unlock(&adapter->sge.reg_lock);
473 free_rx_bufs(pdev, &q->fl[i]);
474 kfree(q->fl[i].sdesc);
475 dma_free_coherent(&pdev->dev,
477 sizeof(struct rx_desc), q->fl[i].desc,
481 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
482 if (q->txq[i].desc) {
483 spin_lock(&adapter->sge.reg_lock);
484 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
485 spin_unlock(&adapter->sge.reg_lock);
486 if (q->txq[i].sdesc) {
487 free_tx_desc(adapter, &q->txq[i],
489 kfree(q->txq[i].sdesc);
491 dma_free_coherent(&pdev->dev,
493 sizeof(struct tx_desc),
494 q->txq[i].desc, q->txq[i].phys_addr);
495 __skb_queue_purge(&q->txq[i].sendq);
499 spin_lock(&adapter->sge.reg_lock);
500 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
501 spin_unlock(&adapter->sge.reg_lock);
502 dma_free_coherent(&pdev->dev,
503 q->rspq.size * sizeof(struct rsp_desc),
504 q->rspq.desc, q->rspq.phys_addr);
508 q->netdev->atalk_ptr = NULL;
510 memset(q, 0, sizeof(*q));
514 * init_qset_cntxt - initialize an SGE queue set context info
516 * @id: the queue set id
518 * Initializes the TIDs and context ids for the queues of a queue set.
520 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
522 qs->rspq.cntxt_id = id;
523 qs->fl[0].cntxt_id = 2 * id;
524 qs->fl[1].cntxt_id = 2 * id + 1;
525 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
526 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
527 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
528 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
529 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
533 * sgl_len - calculates the size of an SGL of the given capacity
534 * @n: the number of SGL entries
536 * Calculates the number of flits needed for a scatter/gather list that
537 * can hold the given number of entries.
539 static inline unsigned int sgl_len(unsigned int n)
541 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
542 return (3 * n) / 2 + (n & 1);
546 * flits_to_desc - returns the num of Tx descriptors for the given flits
547 * @n: the number of flits
549 * Calculates the number of Tx descriptors needed for the supplied number
552 static inline unsigned int flits_to_desc(unsigned int n)
554 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
555 return flit_desc_map[n];
559 * get_packet - return the next ingress packet buffer from a free list
560 * @adap: the adapter that received the packet
561 * @fl: the SGE free list holding the packet
562 * @len: the packet length including any SGE padding
563 * @drop_thres: # of remaining buffers before we start dropping packets
565 * Get the next packet from a free list and complete setup of the
566 * sk_buff. If the packet is small we make a copy and recycle the
567 * original buffer, otherwise we use the original buffer itself. If a
568 * positive drop threshold is supplied packets are dropped and their
569 * buffers recycled if (a) the number of remaining buffers is under the
570 * threshold and the packet is too big to copy, or (b) the packet should
571 * be copied but there is no memory for the copy.
573 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
574 unsigned int len, unsigned int drop_thres)
576 struct sk_buff *skb = NULL;
577 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
579 prefetch(sd->skb->data);
581 if (len <= SGE_RX_COPY_THRES) {
582 skb = alloc_skb(len, GFP_ATOMIC);
583 if (likely(skb != NULL)) {
585 pci_dma_sync_single_for_cpu(adap->pdev,
588 len, PCI_DMA_FROMDEVICE);
589 memcpy(skb->data, sd->skb->data, len);
590 pci_dma_sync_single_for_device(adap->pdev,
593 len, PCI_DMA_FROMDEVICE);
594 } else if (!drop_thres)
597 recycle_rx_buf(adap, fl, fl->cidx);
601 if (unlikely(fl->credits < drop_thres))
605 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
606 fl->buf_size, PCI_DMA_FROMDEVICE);
609 __refill_fl(adap, fl);
614 * get_imm_packet - return the next ingress packet buffer from a response
615 * @resp: the response descriptor containing the packet data
617 * Return a packet containing the immediate data of the given response.
619 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
621 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
624 __skb_put(skb, IMMED_PKT_SIZE);
625 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
631 * calc_tx_descs - calculate the number of Tx descriptors for a packet
634 * Returns the number of Tx descriptors needed for the given Ethernet
635 * packet. Ethernet packets require addition of WR and CPL headers.
637 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
641 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
644 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
645 if (skb_shinfo(skb)->gso_size)
647 return flits_to_desc(flits);
651 * make_sgl - populate a scatter/gather list for a packet
653 * @sgp: the SGL to populate
654 * @start: start address of skb main body data to include in the SGL
655 * @len: length of skb main body data to include in the SGL
656 * @pdev: the PCI device
658 * Generates a scatter/gather list for the buffers that make up a packet
659 * and returns the SGL size in 8-byte words. The caller must size the SGL
662 static inline unsigned int make_sgl(const struct sk_buff *skb,
663 struct sg_ent *sgp, unsigned char *start,
664 unsigned int len, struct pci_dev *pdev)
667 unsigned int i, j = 0, nfrags;
670 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
671 sgp->len[0] = cpu_to_be32(len);
672 sgp->addr[0] = cpu_to_be64(mapping);
676 nfrags = skb_shinfo(skb)->nr_frags;
677 for (i = 0; i < nfrags; i++) {
678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
680 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
681 frag->size, PCI_DMA_TODEVICE);
682 sgp->len[j] = cpu_to_be32(frag->size);
683 sgp->addr[j] = cpu_to_be64(mapping);
690 return ((nfrags + (len != 0)) * 3) / 2 + j;
694 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
698 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
699 * where the HW is going to sleep just after we checked, however,
700 * then the interrupt handler will detect the outstanding TX packet
701 * and ring the doorbell for us.
703 * When GTS is disabled we unconditionally ring the doorbell.
705 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
708 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
709 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
710 set_bit(TXQ_LAST_PKT_DB, &q->flags);
711 t3_write_reg(adap, A_SG_KDOORBELL,
712 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
715 wmb(); /* write descriptors before telling HW */
716 t3_write_reg(adap, A_SG_KDOORBELL,
717 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
721 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
723 #if SGE_NUM_GENBITS == 2
724 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
729 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
730 * @ndesc: number of Tx descriptors spanned by the SGL
731 * @skb: the packet corresponding to the WR
732 * @d: first Tx descriptor to be written
733 * @pidx: index of above descriptors
734 * @q: the SGE Tx queue
736 * @flits: number of flits to the start of the SGL in the first descriptor
737 * @sgl_flits: the SGL size in flits
738 * @gen: the Tx descriptor generation
739 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
740 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
742 * Write a work request header and an associated SGL. If the SGL is
743 * small enough to fit into one Tx descriptor it has already been written
744 * and we just need to write the WR header. Otherwise we distribute the
745 * SGL across the number of descriptors it spans.
747 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
748 struct tx_desc *d, unsigned int pidx,
749 const struct sge_txq *q,
750 const struct sg_ent *sgl,
751 unsigned int flits, unsigned int sgl_flits,
752 unsigned int gen, unsigned int wr_hi,
755 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
756 struct tx_sw_desc *sd = &q->sdesc[pidx];
759 if (need_skb_unmap()) {
760 struct unmap_info *ui = (struct unmap_info *)skb->cb;
767 if (likely(ndesc == 1)) {
768 skb->priority = pidx;
769 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
770 V_WR_SGLSFLT(flits)) | wr_hi;
772 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
773 V_WR_GEN(gen)) | wr_lo;
776 unsigned int ogen = gen;
777 const u64 *fp = (const u64 *)sgl;
778 struct work_request_hdr *wp = wrp;
780 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
781 V_WR_SGLSFLT(flits)) | wr_hi;
784 unsigned int avail = WR_FLITS - flits;
786 if (avail > sgl_flits)
788 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
797 if (++pidx == q->size) {
805 wrp = (struct work_request_hdr *)d;
806 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
807 V_WR_SGLSFLT(1)) | wr_hi;
808 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
810 V_WR_GEN(gen)) | wr_lo;
814 skb->priority = pidx;
815 wrp->wr_hi |= htonl(F_WR_EOP);
817 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
818 wr_gen2((struct tx_desc *)wp, ogen);
824 * write_tx_pkt_wr - write a TX_PKT work request
826 * @skb: the packet to send
827 * @pi: the egress interface
828 * @pidx: index of the first Tx descriptor to write
829 * @gen: the generation value to use
831 * @ndesc: number of descriptors the packet will occupy
832 * @compl: the value of the COMPL bit to use
834 * Generate a TX_PKT work request to send the supplied packet.
836 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
837 const struct port_info *pi,
838 unsigned int pidx, unsigned int gen,
839 struct sge_txq *q, unsigned int ndesc,
842 unsigned int flits, sgl_flits, cntrl, tso_info;
843 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
844 struct tx_desc *d = &q->desc[pidx];
845 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
847 cpl->len = htonl(skb->len | 0x80000000);
848 cntrl = V_TXPKT_INTF(pi->port_id);
850 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
851 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
853 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
856 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
859 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
860 hdr->cntrl = htonl(cntrl);
861 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
862 CPL_ETH_II : CPL_ETH_II_VLAN;
863 tso_info |= V_LSO_ETH_TYPE(eth_type) |
864 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
865 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
866 hdr->lso_info = htonl(tso_info);
869 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
870 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
871 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
872 cpl->cntrl = htonl(cntrl);
874 if (skb->len <= WR_LEN - sizeof(*cpl)) {
875 q->sdesc[pidx].skb = NULL;
877 memcpy(&d->flit[2], skb->data, skb->len);
879 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
881 flits = (skb->len + 7) / 8 + 2;
882 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
883 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
884 | F_WR_SOP | F_WR_EOP | compl);
886 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
896 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
897 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
898 if (need_skb_unmap())
899 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
901 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
902 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
903 htonl(V_WR_TID(q->token)));
907 * eth_xmit - add a packet to the Ethernet Tx queue
909 * @dev: the egress net device
911 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
913 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
915 unsigned int ndesc, pidx, credits, gen, compl;
916 const struct port_info *pi = netdev_priv(dev);
917 struct adapter *adap = dev->priv;
918 struct sge_qset *qs = dev2qset(dev);
919 struct sge_txq *q = &qs->txq[TXQ_ETH];
922 * The chip min packet length is 9 octets but play safe and reject
923 * anything shorter than an Ethernet header.
925 if (unlikely(skb->len < ETH_HLEN)) {
931 reclaim_completed_tx(adap, q);
933 credits = q->size - q->in_use;
934 ndesc = calc_tx_descs(skb);
936 if (unlikely(credits < ndesc)) {
937 if (!netif_queue_stopped(dev)) {
938 netif_stop_queue(dev);
939 set_bit(TXQ_ETH, &qs->txq_stopped);
941 dev_err(&adap->pdev->dev,
942 "%s: Tx ring %u full while queue awake!\n",
943 dev->name, q->cntxt_id & 7);
945 spin_unlock(&q->lock);
946 return NETDEV_TX_BUSY;
950 if (unlikely(credits - ndesc < q->stop_thres)) {
952 netif_stop_queue(dev);
953 set_bit(TXQ_ETH, &qs->txq_stopped);
955 if (should_restart_tx(q) &&
956 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
958 netif_wake_queue(dev);
965 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
969 if (q->pidx >= q->size) {
974 /* update port statistics */
975 if (skb->ip_summed == CHECKSUM_COMPLETE)
976 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
977 if (skb_shinfo(skb)->gso_size)
978 qs->port_stats[SGE_PSTAT_TSO]++;
979 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
980 qs->port_stats[SGE_PSTAT_VLANINS]++;
982 dev->trans_start = jiffies;
983 spin_unlock(&q->lock);
986 * We do not use Tx completion interrupts to free DMAd Tx packets.
987 * This is good for performamce but means that we rely on new Tx
988 * packets arriving to run the destructors of completed packets,
989 * which open up space in their sockets' send queues. Sometimes
990 * we do not get such new packets causing Tx to stall. A single
991 * UDP transmitter is a good example of this situation. We have
992 * a clean up timer that periodically reclaims completed packets
993 * but it doesn't run often enough (nor do we want it to) to prevent
994 * lengthy stalls. A solution to this problem is to run the
995 * destructor early, after the packet is queued but before it's DMAd.
996 * A cons is that we lie to socket memory accounting, but the amount
997 * of extra memory is reasonable (limited by the number of Tx
998 * descriptors), the packets do actually get freed quickly by new
999 * packets almost always, and for protocols like TCP that wait for
1000 * acks to really free up the data the extra memory is even less.
1001 * On the positive side we run the destructors on the sending CPU
1002 * rather than on a potentially different completing CPU, usually a
1003 * good thing. We also run them without holding our Tx queue lock,
1004 * unlike what reclaim_completed_tx() would otherwise do.
1006 * Run the destructor before telling the DMA engine about the packet
1007 * to make sure it doesn't complete and get freed prematurely.
1009 if (likely(!skb_shared(skb)))
1012 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1013 check_ring_tx_db(adap, q);
1014 return NETDEV_TX_OK;
1018 * write_imm - write a packet into a Tx descriptor as immediate data
1019 * @d: the Tx descriptor to write
1021 * @len: the length of packet data to write as immediate data
1022 * @gen: the generation bit value to write
1024 * Writes a packet as immediate data into a Tx descriptor. The packet
1025 * contains a work request at its beginning. We must write the packet
1026 * carefully so the SGE doesn't read accidentally before it's written in
1029 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1030 unsigned int len, unsigned int gen)
1032 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1033 struct work_request_hdr *to = (struct work_request_hdr *)d;
1035 memcpy(&to[1], &from[1], len - sizeof(*from));
1036 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1037 V_WR_BCNTLFLT(len & 7));
1039 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1040 V_WR_LEN((len + 7) / 8));
1046 * check_desc_avail - check descriptor availability on a send queue
1047 * @adap: the adapter
1048 * @q: the send queue
1049 * @skb: the packet needing the descriptors
1050 * @ndesc: the number of Tx descriptors needed
1051 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1053 * Checks if the requested number of Tx descriptors is available on an
1054 * SGE send queue. If the queue is already suspended or not enough
1055 * descriptors are available the packet is queued for later transmission.
1056 * Must be called with the Tx queue locked.
1058 * Returns 0 if enough descriptors are available, 1 if there aren't
1059 * enough descriptors and the packet has been queued, and 2 if the caller
1060 * needs to retry because there weren't enough descriptors at the
1061 * beginning of the call but some freed up in the mean time.
1063 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1064 struct sk_buff *skb, unsigned int ndesc,
1067 if (unlikely(!skb_queue_empty(&q->sendq))) {
1068 addq_exit:__skb_queue_tail(&q->sendq, skb);
1071 if (unlikely(q->size - q->in_use < ndesc)) {
1072 struct sge_qset *qs = txq_to_qset(q, qid);
1074 set_bit(qid, &qs->txq_stopped);
1075 smp_mb__after_clear_bit();
1077 if (should_restart_tx(q) &&
1078 test_and_clear_bit(qid, &qs->txq_stopped))
1088 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1089 * @q: the SGE control Tx queue
1091 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1092 * that send only immediate data (presently just the control queues) and
1093 * thus do not have any sk_buffs to release.
1095 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1097 unsigned int reclaim = q->processed - q->cleaned;
1099 q->in_use -= reclaim;
1100 q->cleaned += reclaim;
1103 static inline int immediate(const struct sk_buff *skb)
1105 return skb->len <= WR_LEN && !skb->data_len;
1109 * ctrl_xmit - send a packet through an SGE control Tx queue
1110 * @adap: the adapter
1111 * @q: the control queue
1114 * Send a packet through an SGE control Tx queue. Packets sent through
1115 * a control queue must fit entirely as immediate data in a single Tx
1116 * descriptor and have no page fragments.
1118 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1119 struct sk_buff *skb)
1122 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1124 if (unlikely(!immediate(skb))) {
1127 return NET_XMIT_SUCCESS;
1130 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1131 wrp->wr_lo = htonl(V_WR_TID(q->token));
1133 spin_lock(&q->lock);
1134 again:reclaim_completed_tx_imm(q);
1136 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1137 if (unlikely(ret)) {
1139 spin_unlock(&q->lock);
1145 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1148 if (++q->pidx >= q->size) {
1152 spin_unlock(&q->lock);
1154 t3_write_reg(adap, A_SG_KDOORBELL,
1155 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1156 return NET_XMIT_SUCCESS;
1160 * restart_ctrlq - restart a suspended control queue
1161 * @qs: the queue set cotaining the control queue
1163 * Resumes transmission on a suspended Tx control queue.
1165 static void restart_ctrlq(unsigned long data)
1167 struct sk_buff *skb;
1168 struct sge_qset *qs = (struct sge_qset *)data;
1169 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1170 struct adapter *adap = qs->netdev->priv;
1172 spin_lock(&q->lock);
1173 again:reclaim_completed_tx_imm(q);
1175 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1177 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1179 if (++q->pidx >= q->size) {
1186 if (!skb_queue_empty(&q->sendq)) {
1187 set_bit(TXQ_CTRL, &qs->txq_stopped);
1188 smp_mb__after_clear_bit();
1190 if (should_restart_tx(q) &&
1191 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1196 spin_unlock(&q->lock);
1197 t3_write_reg(adap, A_SG_KDOORBELL,
1198 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1202 * write_ofld_wr - write an offload work request
1203 * @adap: the adapter
1204 * @skb: the packet to send
1206 * @pidx: index of the first Tx descriptor to write
1207 * @gen: the generation value to use
1208 * @ndesc: number of descriptors the packet will occupy
1210 * Write an offload work request to send the supplied packet. The packet
1211 * data already carry the work request with most fields populated.
1213 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1214 struct sge_txq *q, unsigned int pidx,
1215 unsigned int gen, unsigned int ndesc)
1217 unsigned int sgl_flits, flits;
1218 struct work_request_hdr *from;
1219 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1220 struct tx_desc *d = &q->desc[pidx];
1222 if (immediate(skb)) {
1223 q->sdesc[pidx].skb = NULL;
1224 write_imm(d, skb, skb->len, gen);
1228 /* Only TX_DATA builds SGLs */
1230 from = (struct work_request_hdr *)skb->data;
1231 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1233 flits = (skb->h.raw - skb->data) / 8;
1234 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1235 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1237 if (need_skb_unmap())
1238 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1240 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1241 gen, from->wr_hi, from->wr_lo);
1245 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1248 * Returns the number of Tx descriptors needed for the given offload
1249 * packet. These packets are already fully constructed.
1251 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1253 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1255 if (skb->len <= WR_LEN && cnt == 0)
1256 return 1; /* packet fits as immediate data */
1258 flits = (skb->h.raw - skb->data) / 8; /* headers */
1259 if (skb->tail != skb->h.raw)
1261 return flits_to_desc(flits + sgl_len(cnt));
1265 * ofld_xmit - send a packet through an offload queue
1266 * @adap: the adapter
1267 * @q: the Tx offload queue
1270 * Send an offload packet through an SGE offload queue.
1272 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1273 struct sk_buff *skb)
1276 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1278 spin_lock(&q->lock);
1279 again:reclaim_completed_tx(adap, q);
1281 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1282 if (unlikely(ret)) {
1284 skb->priority = ndesc; /* save for restart */
1285 spin_unlock(&q->lock);
1295 if (q->pidx >= q->size) {
1299 spin_unlock(&q->lock);
1301 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1302 check_ring_tx_db(adap, q);
1303 return NET_XMIT_SUCCESS;
1307 * restart_offloadq - restart a suspended offload queue
1308 * @qs: the queue set cotaining the offload queue
1310 * Resumes transmission on a suspended Tx offload queue.
1312 static void restart_offloadq(unsigned long data)
1314 struct sk_buff *skb;
1315 struct sge_qset *qs = (struct sge_qset *)data;
1316 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1317 struct adapter *adap = qs->netdev->priv;
1319 spin_lock(&q->lock);
1320 again:reclaim_completed_tx(adap, q);
1322 while ((skb = skb_peek(&q->sendq)) != NULL) {
1323 unsigned int gen, pidx;
1324 unsigned int ndesc = skb->priority;
1326 if (unlikely(q->size - q->in_use < ndesc)) {
1327 set_bit(TXQ_OFLD, &qs->txq_stopped);
1328 smp_mb__after_clear_bit();
1330 if (should_restart_tx(q) &&
1331 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1341 if (q->pidx >= q->size) {
1345 __skb_unlink(skb, &q->sendq);
1346 spin_unlock(&q->lock);
1348 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1349 spin_lock(&q->lock);
1351 spin_unlock(&q->lock);
1354 set_bit(TXQ_RUNNING, &q->flags);
1355 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1357 t3_write_reg(adap, A_SG_KDOORBELL,
1358 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1362 * queue_set - return the queue set a packet should use
1365 * Maps a packet to the SGE queue set it should use. The desired queue
1366 * set is carried in bits 1-3 in the packet's priority.
1368 static inline int queue_set(const struct sk_buff *skb)
1370 return skb->priority >> 1;
1374 * is_ctrl_pkt - return whether an offload packet is a control packet
1377 * Determines whether an offload packet should use an OFLD or a CTRL
1378 * Tx queue. This is indicated by bit 0 in the packet's priority.
1380 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1382 return skb->priority & 1;
1386 * t3_offload_tx - send an offload packet
1387 * @tdev: the offload device to send to
1390 * Sends an offload packet. We use the packet priority to select the
1391 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1392 * should be sent as regular or control, bits 1-3 select the queue set.
1394 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1396 struct adapter *adap = tdev2adap(tdev);
1397 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1399 if (unlikely(is_ctrl_pkt(skb)))
1400 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1402 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1406 * offload_enqueue - add an offload packet to an SGE offload receive queue
1407 * @q: the SGE response queue
1410 * Add a new offload packet to an SGE response queue's offload packet
1411 * queue. If the packet is the first on the queue it schedules the RX
1412 * softirq to process the queue.
1414 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1416 skb->next = skb->prev = NULL;
1418 q->rx_tail->next = skb;
1420 struct sge_qset *qs = rspq_to_qset(q);
1422 if (__netif_rx_schedule_prep(qs->netdev))
1423 __netif_rx_schedule(qs->netdev);
1430 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1431 * @tdev: the offload device that will be receiving the packets
1432 * @q: the SGE response queue that assembled the bundle
1433 * @skbs: the partial bundle
1434 * @n: the number of packets in the bundle
1436 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1438 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1440 struct sk_buff *skbs[], int n)
1443 q->offload_bundles++;
1444 tdev->recv(tdev, skbs, n);
1449 * ofld_poll - NAPI handler for offload packets in interrupt mode
1450 * @dev: the network device doing the polling
1451 * @budget: polling budget
1453 * The NAPI handler for offload packets when a response queue is serviced
1454 * by the hard interrupt handler, i.e., when it's operating in non-polling
1455 * mode. Creates small packet batches and sends them through the offload
1456 * receive handler. Batches need to be of modest size as we do prefetches
1457 * on the packets in each.
1459 static int ofld_poll(struct net_device *dev, int *budget)
1461 struct adapter *adapter = dev->priv;
1462 struct sge_qset *qs = dev2qset(dev);
1463 struct sge_rspq *q = &qs->rspq;
1464 int work_done, limit = min(*budget, dev->quota), avail = limit;
1467 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1470 spin_lock_irq(&q->lock);
1473 work_done = limit - avail;
1474 *budget -= work_done;
1475 dev->quota -= work_done;
1476 __netif_rx_complete(dev);
1477 spin_unlock_irq(&q->lock);
1482 q->rx_head = q->rx_tail = NULL;
1483 spin_unlock_irq(&q->lock);
1485 for (ngathered = 0; avail && head; avail--) {
1486 prefetch(head->data);
1487 skbs[ngathered] = head;
1489 skbs[ngathered]->next = NULL;
1490 if (++ngathered == RX_BUNDLE_SIZE) {
1491 q->offload_bundles++;
1492 adapter->tdev.recv(&adapter->tdev, skbs,
1497 if (head) { /* splice remaining packets back onto Rx queue */
1498 spin_lock_irq(&q->lock);
1499 tail->next = q->rx_head;
1503 spin_unlock_irq(&q->lock);
1505 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1507 work_done = limit - avail;
1508 *budget -= work_done;
1509 dev->quota -= work_done;
1514 * rx_offload - process a received offload packet
1515 * @tdev: the offload device receiving the packet
1516 * @rq: the response queue that received the packet
1518 * @rx_gather: a gather list of packets if we are building a bundle
1519 * @gather_idx: index of the next available slot in the bundle
1521 * Process an ingress offload pakcet and add it to the offload ingress
1522 * queue. Returns the index of the next available slot in the bundle.
1524 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1525 struct sk_buff *skb, struct sk_buff *rx_gather[],
1526 unsigned int gather_idx)
1529 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1532 rx_gather[gather_idx++] = skb;
1533 if (gather_idx == RX_BUNDLE_SIZE) {
1534 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1536 rq->offload_bundles++;
1539 offload_enqueue(rq, skb);
1545 * update_tx_completed - update the number of processed Tx descriptors
1546 * @qs: the queue set to update
1547 * @idx: which Tx queue within the set to update
1548 * @credits: number of new processed descriptors
1549 * @tx_completed: accumulates credits for the queues
1551 * Updates the number of completed Tx descriptors for a queue set's Tx
1552 * queue. On UP systems we updated the information immediately but on
1553 * MP we accumulate the credits locally and update the Tx queue when we
1554 * reach a threshold to avoid cache-line bouncing.
1556 static inline void update_tx_completed(struct sge_qset *qs, int idx,
1557 unsigned int credits,
1558 unsigned int tx_completed[])
1561 tx_completed[idx] += credits;
1562 if (tx_completed[idx] > 32) {
1563 qs->txq[idx].processed += tx_completed[idx];
1564 tx_completed[idx] = 0;
1567 qs->txq[idx].processed += credits;
1572 * restart_tx - check whether to restart suspended Tx queues
1573 * @qs: the queue set to resume
1575 * Restarts suspended Tx queues of an SGE queue set if they have enough
1576 * free resources to resume operation.
1578 static void restart_tx(struct sge_qset *qs)
1580 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1581 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1582 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1583 qs->txq[TXQ_ETH].restarts++;
1584 if (netif_running(qs->netdev))
1585 netif_wake_queue(qs->netdev);
1588 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1589 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1590 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1591 qs->txq[TXQ_OFLD].restarts++;
1592 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1594 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1595 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1596 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1597 qs->txq[TXQ_CTRL].restarts++;
1598 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1603 * rx_eth - process an ingress ethernet packet
1604 * @adap: the adapter
1605 * @rq: the response queue that received the packet
1607 * @pad: amount of padding at the start of the buffer
1609 * Process an ingress ethernet pakcet and deliver it to the stack.
1610 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1611 * if it was immediate data in a response.
1613 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1614 struct sk_buff *skb, int pad)
1616 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1617 struct port_info *pi;
1620 skb_pull(skb, sizeof(*p) + pad);
1621 skb->dev = adap->port[p->iff];
1622 skb->dev->last_rx = jiffies;
1623 skb->protocol = eth_type_trans(skb, skb->dev);
1624 pi = netdev_priv(skb->dev);
1625 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1627 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1628 skb->ip_summed = CHECKSUM_UNNECESSARY;
1630 skb->ip_summed = CHECKSUM_NONE;
1632 if (unlikely(p->vlan_valid)) {
1633 struct vlan_group *grp = pi->vlan_grp;
1635 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1637 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1640 dev_kfree_skb_any(skb);
1641 } else if (rq->polling)
1642 netif_receive_skb(skb);
1648 * handle_rsp_cntrl_info - handles control information in a response
1649 * @qs: the queue set corresponding to the response
1650 * @flags: the response control flags
1651 * @tx_completed: accumulates completion credits for the Tx queues
1653 * Handles the control information of an SGE response, such as GTS
1654 * indications and completion credits for the queue set's Tx queues.
1656 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags,
1657 unsigned int tx_completed[])
1659 unsigned int credits;
1662 if (flags & F_RSPD_TXQ0_GTS)
1663 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1666 /* ETH credits are already coalesced, return them immediately. */
1667 credits = G_RSPD_TXQ0_CR(flags);
1669 qs->txq[TXQ_ETH].processed += credits;
1672 if (flags & F_RSPD_TXQ1_GTS)
1673 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1675 update_tx_completed(qs, TXQ_OFLD, G_RSPD_TXQ1_CR(flags), tx_completed);
1676 update_tx_completed(qs, TXQ_CTRL, G_RSPD_TXQ2_CR(flags), tx_completed);
1680 * flush_tx_completed - returns accumulated Tx completions to Tx queues
1681 * @qs: the queue set to update
1682 * @tx_completed: pending completion credits to return to Tx queues
1684 * Updates the number of completed Tx descriptors for a queue set's Tx
1685 * queues with the credits pending in @tx_completed. This does something
1686 * only on MP systems as on UP systems we return the credits immediately.
1688 static inline void flush_tx_completed(struct sge_qset *qs,
1689 unsigned int tx_completed[])
1691 #if defined(CONFIG_SMP)
1692 if (tx_completed[TXQ_OFLD])
1693 qs->txq[TXQ_OFLD].processed += tx_completed[TXQ_OFLD];
1694 if (tx_completed[TXQ_CTRL])
1695 qs->txq[TXQ_CTRL].processed += tx_completed[TXQ_CTRL];
1700 * check_ring_db - check if we need to ring any doorbells
1701 * @adapter: the adapter
1702 * @qs: the queue set whose Tx queues are to be examined
1703 * @sleeping: indicates which Tx queue sent GTS
1705 * Checks if some of a queue set's Tx queues need to ring their doorbells
1706 * to resume transmission after idling while they still have unprocessed
1709 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1710 unsigned int sleeping)
1712 if (sleeping & F_RSPD_TXQ0_GTS) {
1713 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1715 if (txq->cleaned + txq->in_use != txq->processed &&
1716 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1717 set_bit(TXQ_RUNNING, &txq->flags);
1718 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1719 V_EGRCNTX(txq->cntxt_id));
1723 if (sleeping & F_RSPD_TXQ1_GTS) {
1724 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1726 if (txq->cleaned + txq->in_use != txq->processed &&
1727 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1728 set_bit(TXQ_RUNNING, &txq->flags);
1729 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1730 V_EGRCNTX(txq->cntxt_id));
1736 * is_new_response - check if a response is newly written
1737 * @r: the response descriptor
1738 * @q: the response queue
1740 * Returns true if a response descriptor contains a yet unprocessed
1743 static inline int is_new_response(const struct rsp_desc *r,
1744 const struct sge_rspq *q)
1746 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1749 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1750 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1751 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1752 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1753 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1755 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1756 #define NOMEM_INTR_DELAY 2500
1759 * process_responses - process responses from an SGE response queue
1760 * @adap: the adapter
1761 * @qs: the queue set to which the response queue belongs
1762 * @budget: how many responses can be processed in this round
1764 * Process responses from an SGE response queue up to the supplied budget.
1765 * Responses include received packets as well as credits and other events
1766 * for the queues that belong to the response queue's queue set.
1767 * A negative budget is effectively unlimited.
1769 * Additionally choose the interrupt holdoff time for the next interrupt
1770 * on this queue. If the system is under memory shortage use a fairly
1771 * long delay to help recovery.
1773 static int process_responses(struct adapter *adap, struct sge_qset *qs,
1776 struct sge_rspq *q = &qs->rspq;
1777 struct rsp_desc *r = &q->desc[q->cidx];
1778 int budget_left = budget;
1779 unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
1780 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1783 q->next_holdoff = q->holdoff_tmr;
1785 while (likely(budget_left && is_new_response(r, q))) {
1786 int eth, ethpad = 0;
1787 struct sk_buff *skb = NULL;
1788 u32 len, flags = ntohl(r->flags);
1789 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1791 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1793 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1794 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1798 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1799 skb->data[0] = CPL_ASYNC_NOTIF;
1800 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1802 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1803 skb = get_imm_packet(r);
1804 if (unlikely(!skb)) {
1806 q->next_holdoff = NOMEM_INTR_DELAY;
1808 /* consume one credit since we tried */
1813 } else if ((len = ntohl(r->len_cq)) != 0) {
1816 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1818 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1819 eth ? SGE_RX_DROP_THRES : 0);
1822 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1825 if (++fl->cidx == fl->size)
1830 if (flags & RSPD_CTRL_MASK) {
1831 sleeping |= flags & RSPD_GTS_MASK;
1832 handle_rsp_cntrl_info(qs, flags, tx_completed);
1836 if (unlikely(++q->cidx == q->size)) {
1843 if (++q->credits >= (q->size / 4)) {
1844 refill_rspq(adap, q, q->credits);
1848 if (likely(skb != NULL)) {
1850 rx_eth(adap, q, skb, ethpad);
1852 /* Preserve the RSS info in csum & priority */
1854 skb->priority = rss_lo;
1855 ngathered = rx_offload(&adap->tdev, q, skb,
1856 offload_skbs, ngathered);
1863 flush_tx_completed(qs, tx_completed);
1864 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1866 check_ring_db(adap, qs, sleeping);
1868 smp_mb(); /* commit Tx queue .processed updates */
1869 if (unlikely(qs->txq_stopped != 0))
1872 budget -= budget_left;
1876 static inline int is_pure_response(const struct rsp_desc *r)
1878 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1880 return (n | r->len_cq) == 0;
1884 * napi_rx_handler - the NAPI handler for Rx processing
1885 * @dev: the net device
1886 * @budget: how many packets we can process in this round
1888 * Handler for new data events when using NAPI.
1890 static int napi_rx_handler(struct net_device *dev, int *budget)
1892 struct adapter *adap = dev->priv;
1893 struct sge_qset *qs = dev2qset(dev);
1894 int effective_budget = min(*budget, dev->quota);
1896 int work_done = process_responses(adap, qs, effective_budget);
1897 *budget -= work_done;
1898 dev->quota -= work_done;
1900 if (work_done >= effective_budget)
1903 netif_rx_complete(dev);
1906 * Because we don't atomically flush the following write it is
1907 * possible that in very rare cases it can reach the device in a way
1908 * that races with a new response being written plus an error interrupt
1909 * causing the NAPI interrupt handler below to return unhandled status
1910 * to the OS. To protect against this would require flushing the write
1911 * and doing both the write and the flush with interrupts off. Way too
1912 * expensive and unjustifiable given the rarity of the race.
1914 * The race cannot happen at all with MSI-X.
1916 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1917 V_NEWTIMER(qs->rspq.next_holdoff) |
1918 V_NEWINDEX(qs->rspq.cidx));
1923 * Returns true if the device is already scheduled for polling.
1925 static inline int napi_is_scheduled(struct net_device *dev)
1927 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1931 * process_pure_responses - process pure responses from a response queue
1932 * @adap: the adapter
1933 * @qs: the queue set owning the response queue
1934 * @r: the first pure response to process
1936 * A simpler version of process_responses() that handles only pure (i.e.,
1937 * non data-carrying) responses. Such respones are too light-weight to
1938 * justify calling a softirq under NAPI, so we handle them specially in
1939 * the interrupt handler. The function is called with a pointer to a
1940 * response, which the caller must ensure is a valid pure response.
1942 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1944 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1947 struct sge_rspq *q = &qs->rspq;
1948 unsigned int sleeping = 0, tx_completed[3] = { 0, 0, 0 };
1951 u32 flags = ntohl(r->flags);
1954 if (unlikely(++q->cidx == q->size)) {
1961 if (flags & RSPD_CTRL_MASK) {
1962 sleeping |= flags & RSPD_GTS_MASK;
1963 handle_rsp_cntrl_info(qs, flags, tx_completed);
1967 if (++q->credits >= (q->size / 4)) {
1968 refill_rspq(adap, q, q->credits);
1971 } while (is_new_response(r, q) && is_pure_response(r));
1973 flush_tx_completed(qs, tx_completed);
1976 check_ring_db(adap, qs, sleeping);
1978 smp_mb(); /* commit Tx queue .processed updates */
1979 if (unlikely(qs->txq_stopped != 0))
1982 return is_new_response(r, q);
1986 * handle_responses - decide what to do with new responses in NAPI mode
1987 * @adap: the adapter
1988 * @q: the response queue
1990 * This is used by the NAPI interrupt handlers to decide what to do with
1991 * new SGE responses. If there are no new responses it returns -1. If
1992 * there are new responses and they are pure (i.e., non-data carrying)
1993 * it handles them straight in hard interrupt context as they are very
1994 * cheap and don't deliver any packets. Finally, if there are any data
1995 * signaling responses it schedules the NAPI handler. Returns 1 if it
1996 * schedules NAPI, 0 if all new responses were pure.
1998 * The caller must ascertain NAPI is not already running.
2000 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2002 struct sge_qset *qs = rspq_to_qset(q);
2003 struct rsp_desc *r = &q->desc[q->cidx];
2005 if (!is_new_response(r, q))
2007 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2008 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2009 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2012 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2013 __netif_rx_schedule(qs->netdev);
2018 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2019 * (i.e., response queue serviced in hard interrupt).
2021 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2023 struct sge_qset *qs = cookie;
2024 struct adapter *adap = qs->netdev->priv;
2025 struct sge_rspq *q = &qs->rspq;
2027 spin_lock(&q->lock);
2028 if (process_responses(adap, qs, -1) == 0)
2029 q->unhandled_irqs++;
2030 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2031 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2032 spin_unlock(&q->lock);
2037 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2038 * (i.e., response queue serviced by NAPI polling).
2040 irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2042 struct sge_qset *qs = cookie;
2043 struct adapter *adap = qs->netdev->priv;
2044 struct sge_rspq *q = &qs->rspq;
2046 spin_lock(&q->lock);
2047 BUG_ON(napi_is_scheduled(qs->netdev));
2049 if (handle_responses(adap, q) < 0)
2050 q->unhandled_irqs++;
2051 spin_unlock(&q->lock);
2056 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2057 * SGE response queues as well as error and other async events as they all use
2058 * the same MSI vector. We use one SGE response queue per port in this mode
2059 * and protect all response queues with queue 0's lock.
2061 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2063 int new_packets = 0;
2064 struct adapter *adap = cookie;
2065 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2067 spin_lock(&q->lock);
2069 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2070 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2071 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2075 if (adap->params.nports == 2 &&
2076 process_responses(adap, &adap->sge.qs[1], -1)) {
2077 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2079 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2080 V_NEWTIMER(q1->next_holdoff) |
2081 V_NEWINDEX(q1->cidx));
2085 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2086 q->unhandled_irqs++;
2088 spin_unlock(&q->lock);
2092 static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2094 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2095 if (likely(__netif_rx_schedule_prep(dev)))
2096 __netif_rx_schedule(dev);
2103 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2104 * by NAPI polling). Handles data events from SGE response queues as well as
2105 * error and other async events as they all use the same MSI vector. We use
2106 * one SGE response queue per port in this mode and protect all response
2107 * queues with queue 0's lock.
2109 irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2112 struct adapter *adap = cookie;
2113 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2115 spin_lock(&q->lock);
2117 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2118 if (adap->params.nports == 2)
2119 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2120 &adap->sge.qs[1].rspq);
2121 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2122 q->unhandled_irqs++;
2124 spin_unlock(&q->lock);
2129 * A helper function that processes responses and issues GTS.
2131 static inline int process_responses_gts(struct adapter *adap,
2132 struct sge_rspq *rq)
2136 work = process_responses(adap, rspq_to_qset(rq), -1);
2137 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2138 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2143 * The legacy INTx interrupt handler. This needs to handle data events from
2144 * SGE response queues as well as error and other async events as they all use
2145 * the same interrupt pin. We use one SGE response queue per port in this mode
2146 * and protect all response queues with queue 0's lock.
2148 static irqreturn_t t3_intr(int irq, void *cookie)
2150 int work_done, w0, w1;
2151 struct adapter *adap = cookie;
2152 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2153 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2155 spin_lock(&q0->lock);
2157 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2158 w1 = adap->params.nports == 2 &&
2159 is_new_response(&q1->desc[q1->cidx], q1);
2161 if (likely(w0 | w1)) {
2162 t3_write_reg(adap, A_PL_CLI, 0);
2163 t3_read_reg(adap, A_PL_CLI); /* flush */
2166 process_responses_gts(adap, q0);
2169 process_responses_gts(adap, q1);
2171 work_done = w0 | w1;
2173 work_done = t3_slow_intr_handler(adap);
2175 spin_unlock(&q0->lock);
2176 return IRQ_RETVAL(work_done != 0);
2180 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2181 * Handles data events from SGE response queues as well as error and other
2182 * async events as they all use the same interrupt pin. We use one SGE
2183 * response queue per port in this mode and protect all response queues with
2186 static irqreturn_t t3b_intr(int irq, void *cookie)
2189 struct adapter *adap = cookie;
2190 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2192 t3_write_reg(adap, A_PL_CLI, 0);
2193 map = t3_read_reg(adap, A_SG_DATA_INTR);
2195 if (unlikely(!map)) /* shared interrupt, most likely */
2198 spin_lock(&q0->lock);
2200 if (unlikely(map & F_ERRINTR))
2201 t3_slow_intr_handler(adap);
2203 if (likely(map & 1))
2204 process_responses_gts(adap, q0);
2207 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2209 spin_unlock(&q0->lock);
2214 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2215 * Handles data events from SGE response queues as well as error and other
2216 * async events as they all use the same interrupt pin. We use one SGE
2217 * response queue per port in this mode and protect all response queues with
2220 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2223 struct net_device *dev;
2224 struct adapter *adap = cookie;
2225 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2227 t3_write_reg(adap, A_PL_CLI, 0);
2228 map = t3_read_reg(adap, A_SG_DATA_INTR);
2230 if (unlikely(!map)) /* shared interrupt, most likely */
2233 spin_lock(&q0->lock);
2235 if (unlikely(map & F_ERRINTR))
2236 t3_slow_intr_handler(adap);
2238 if (likely(map & 1)) {
2239 dev = adap->sge.qs[0].netdev;
2241 BUG_ON(napi_is_scheduled(dev));
2242 if (likely(__netif_rx_schedule_prep(dev)))
2243 __netif_rx_schedule(dev);
2246 dev = adap->sge.qs[1].netdev;
2248 BUG_ON(napi_is_scheduled(dev));
2249 if (likely(__netif_rx_schedule_prep(dev)))
2250 __netif_rx_schedule(dev);
2253 spin_unlock(&q0->lock);
2258 * t3_intr_handler - select the top-level interrupt handler
2259 * @adap: the adapter
2260 * @polling: whether using NAPI to service response queues
2262 * Selects the top-level interrupt handler based on the type of interrupts
2263 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2266 intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2268 if (adap->flags & USING_MSIX)
2269 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2270 if (adap->flags & USING_MSI)
2271 return polling ? t3_intr_msi_napi : t3_intr_msi;
2272 if (adap->params.rev > 0)
2273 return polling ? t3b_intr_napi : t3b_intr;
2278 * t3_sge_err_intr_handler - SGE async event interrupt handler
2279 * @adapter: the adapter
2281 * Interrupt handler for SGE asynchronous (non-data) events.
2283 void t3_sge_err_intr_handler(struct adapter *adapter)
2285 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2287 if (status & F_RSPQCREDITOVERFOW)
2288 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2290 if (status & F_RSPQDISABLED) {
2291 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2294 "packet delivered to disabled response queue "
2295 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2298 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2299 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2300 t3_fatal_err(adapter);
2304 * sge_timer_cb - perform periodic maintenance of an SGE qset
2305 * @data: the SGE queue set to maintain
2307 * Runs periodically from a timer to perform maintenance of an SGE queue
2308 * set. It performs two tasks:
2310 * a) Cleans up any completed Tx descriptors that may still be pending.
2311 * Normal descriptor cleanup happens when new packets are added to a Tx
2312 * queue so this timer is relatively infrequent and does any cleanup only
2313 * if the Tx queue has not seen any new packets in a while. We make a
2314 * best effort attempt to reclaim descriptors, in that we don't wait
2315 * around if we cannot get a queue's lock (which most likely is because
2316 * someone else is queueing new packets and so will also handle the clean
2317 * up). Since control queues use immediate data exclusively we don't
2318 * bother cleaning them up here.
2320 * b) Replenishes Rx queues that have run out due to memory shortage.
2321 * Normally new Rx buffers are added when existing ones are consumed but
2322 * when out of memory a queue can become empty. We try to add only a few
2323 * buffers here, the queue will be replenished fully as these new buffers
2324 * are used up if memory shortage has subsided.
2326 static void sge_timer_cb(unsigned long data)
2329 struct sge_qset *qs = (struct sge_qset *)data;
2330 struct adapter *adap = qs->netdev->priv;
2332 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2333 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2334 spin_unlock(&qs->txq[TXQ_ETH].lock);
2336 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2337 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2338 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2340 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2341 &adap->sge.qs[0].rspq.lock;
2342 if (spin_trylock_irq(lock)) {
2343 if (!napi_is_scheduled(qs->netdev)) {
2344 if (qs->fl[0].credits < qs->fl[0].size)
2345 __refill_fl(adap, &qs->fl[0]);
2346 if (qs->fl[1].credits < qs->fl[1].size)
2347 __refill_fl(adap, &qs->fl[1]);
2349 spin_unlock_irq(lock);
2351 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2355 * t3_update_qset_coalesce - update coalescing settings for a queue set
2356 * @qs: the SGE queue set
2357 * @p: new queue set parameters
2359 * Update the coalescing settings for an SGE queue set. Nothing is done
2360 * if the queue set is not initialized yet.
2362 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2367 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2368 qs->rspq.polling = p->polling;
2369 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2373 * t3_sge_alloc_qset - initialize an SGE queue set
2374 * @adapter: the adapter
2375 * @id: the queue set id
2376 * @nports: how many Ethernet ports will be using this queue set
2377 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2378 * @p: configuration parameters for this queue set
2379 * @ntxq: number of Tx queues for the queue set
2380 * @netdev: net device associated with this queue set
2382 * Allocate resources and initialize an SGE queue set. A queue set
2383 * comprises a response queue, two Rx free-buffer queues, and up to 3
2384 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2385 * queue, offload queue, and control queue.
2387 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2388 int irq_vec_idx, const struct qset_params *p,
2389 int ntxq, struct net_device *netdev)
2391 int i, ret = -ENOMEM;
2392 struct sge_qset *q = &adapter->sge.qs[id];
2394 init_qset_cntxt(q, id);
2395 init_timer(&q->tx_reclaim_timer);
2396 q->tx_reclaim_timer.data = (unsigned long)q;
2397 q->tx_reclaim_timer.function = sge_timer_cb;
2399 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2400 sizeof(struct rx_desc),
2401 sizeof(struct rx_sw_desc),
2402 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2406 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2407 sizeof(struct rx_desc),
2408 sizeof(struct rx_sw_desc),
2409 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2413 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2414 sizeof(struct rsp_desc), 0,
2415 &q->rspq.phys_addr, NULL);
2419 for (i = 0; i < ntxq; ++i) {
2421 * The control queue always uses immediate data so does not
2422 * need to keep track of any sk_buffs.
2424 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2426 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2427 sizeof(struct tx_desc), sz,
2428 &q->txq[i].phys_addr,
2430 if (!q->txq[i].desc)
2434 q->txq[i].size = p->txq_size[i];
2435 spin_lock_init(&q->txq[i].lock);
2436 skb_queue_head_init(&q->txq[i].sendq);
2439 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2441 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2444 q->fl[0].gen = q->fl[1].gen = 1;
2445 q->fl[0].size = p->fl_size;
2446 q->fl[1].size = p->jumbo_size;
2449 q->rspq.size = p->rspq_size;
2450 spin_lock_init(&q->rspq.lock);
2452 q->txq[TXQ_ETH].stop_thres = nports *
2453 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2456 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2457 sizeof(struct cpl_rx_pkt);
2458 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2459 sizeof(struct cpl_rx_pkt);
2461 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2462 sizeof(struct cpl_rx_data);
2463 q->fl[1].buf_size = (16 * 1024) -
2464 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2467 spin_lock(&adapter->sge.reg_lock);
2469 /* FL threshold comparison uses < */
2470 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2471 q->rspq.phys_addr, q->rspq.size,
2472 q->fl[0].buf_size, 1, 0);
2476 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2477 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2478 q->fl[i].phys_addr, q->fl[i].size,
2479 q->fl[i].buf_size, p->cong_thres, 1,
2485 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2486 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2487 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2493 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2494 USE_GTS, SGE_CNTXT_OFLD, id,
2495 q->txq[TXQ_OFLD].phys_addr,
2496 q->txq[TXQ_OFLD].size, 0, 1, 0);
2502 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2504 q->txq[TXQ_CTRL].phys_addr,
2505 q->txq[TXQ_CTRL].size,
2506 q->txq[TXQ_CTRL].token, 1, 0);
2511 spin_unlock(&adapter->sge.reg_lock);
2513 t3_update_qset_coalesce(q, p);
2516 * We use atalk_ptr as a backpointer to a qset. In case a device is
2517 * associated with multiple queue sets only the first one sets
2520 if (netdev->atalk_ptr == NULL)
2521 netdev->atalk_ptr = q;
2523 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2524 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2525 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2527 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2528 V_NEWTIMER(q->rspq.holdoff_tmr));
2530 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2534 spin_unlock(&adapter->sge.reg_lock);
2536 t3_free_qset(adapter, q);
2541 * t3_free_sge_resources - free SGE resources
2542 * @adap: the adapter
2544 * Frees resources used by the SGE queue sets.
2546 void t3_free_sge_resources(struct adapter *adap)
2550 for (i = 0; i < SGE_QSETS; ++i)
2551 t3_free_qset(adap, &adap->sge.qs[i]);
2555 * t3_sge_start - enable SGE
2556 * @adap: the adapter
2558 * Enables the SGE for DMAs. This is the last step in starting packet
2561 void t3_sge_start(struct adapter *adap)
2563 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2567 * t3_sge_stop - disable SGE operation
2568 * @adap: the adapter
2570 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2571 * from error interrupts) or from normal process context. In the latter
2572 * case it also disables any pending queue restart tasklets. Note that
2573 * if it is called in interrupt context it cannot disable the restart
2574 * tasklets as it cannot wait, however the tasklets will have no effect
2575 * since the doorbells are disabled and the driver will call this again
2576 * later from process context, at which time the tasklets will be stopped
2577 * if they are still running.
2579 void t3_sge_stop(struct adapter *adap)
2581 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2582 if (!in_interrupt()) {
2585 for (i = 0; i < SGE_QSETS; ++i) {
2586 struct sge_qset *qs = &adap->sge.qs[i];
2588 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2589 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2595 * t3_sge_init - initialize SGE
2596 * @adap: the adapter
2597 * @p: the SGE parameters
2599 * Performs SGE initialization needed every time after a chip reset.
2600 * We do not initialize any of the queue sets here, instead the driver
2601 * top-level must request those individually. We also do not enable DMA
2602 * here, that should be done after the queues have been set up.
2604 void t3_sge_init(struct adapter *adap, struct sge_params *p)
2606 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2608 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2610 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2611 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2612 #if SGE_NUM_GENBITS == 1
2613 ctrl |= F_EGRGENCTRL;
2615 if (adap->params.rev > 0) {
2616 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2617 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2618 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2620 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2621 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2622 V_LORCQDRBTHRSH(512));
2623 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2624 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2625 V_TIMEOUT(100 * core_ticks_per_usec(adap)));
2626 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2627 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2628 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2629 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2630 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2631 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2635 * t3_sge_prep - one-time SGE initialization
2636 * @adap: the associated adapter
2637 * @p: SGE parameters
2639 * Performs one-time initialization of SGE SW state. Includes determining
2640 * defaults for the assorted SGE parameters, which admins can change until
2641 * they are used to initialize the SGE.
2643 void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2647 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2648 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2650 for (i = 0; i < SGE_QSETS; ++i) {
2651 struct qset_params *q = p->qset + i;
2653 q->polling = adap->params.rev > 0;
2654 q->coalesce_usecs = 5;
2655 q->rspq_size = 1024;
2657 q->jumbo_size = 512;
2658 q->txq_size[TXQ_ETH] = 1024;
2659 q->txq_size[TXQ_OFLD] = 1024;
2660 q->txq_size[TXQ_CTRL] = 256;
2664 spin_lock_init(&adap->sge.reg_lock);
2668 * t3_get_desc - dump an SGE descriptor for debugging purposes
2669 * @qs: the queue set
2670 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2671 * @idx: the descriptor index in the queue
2672 * @data: where to dump the descriptor contents
2674 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2675 * size of the descriptor.
2677 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2678 unsigned char *data)
2684 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2686 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2687 return sizeof(struct tx_desc);
2691 if (!qs->rspq.desc || idx >= qs->rspq.size)
2693 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2694 return sizeof(struct rsp_desc);
2698 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2700 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2701 return sizeof(struct rx_desc);