cxgb3 - Unmap offload packets when they are freed
[linux-2.6-block.git] / drivers / net / cxgb3 / sge.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
48#define SGE_RX_COPY_THRES 256
49
50# define SGE_RX_DROP_THRES 16
51
52/*
53 * Period of the Tx buffer reclaim timer. This timer does not need to run
54 * frequently as Tx buffers are usually reclaimed by new Tx packets.
55 */
56#define TX_RECLAIM_PERIOD (HZ / 4)
57
58/* WR size in bytes */
59#define WR_LEN (WR_FLITS * 8)
60
61/*
62 * Types of Tx queues in each queue set. Order here matters, do not change.
63 */
64enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
65
66/* Values for sge_txq.flags */
67enum {
68 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
69 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
70};
71
72struct tx_desc {
73 u64 flit[TX_DESC_FLITS];
74};
75
76struct rx_desc {
77 __be32 addr_lo;
78 __be32 len_gen;
79 __be32 gen2;
80 __be32 addr_hi;
81};
82
83struct tx_sw_desc { /* SW state per Tx descriptor */
84 struct sk_buff *skb;
85};
86
87struct rx_sw_desc { /* SW state per Rx descriptor */
88 struct sk_buff *skb;
89 DECLARE_PCI_UNMAP_ADDR(dma_addr);
90};
91
92struct rsp_desc { /* response queue descriptor */
93 struct rss_header rss_hdr;
94 __be32 flags;
95 __be32 len_cq;
96 u8 imm_data[47];
97 u8 intr_gen;
98};
99
100struct unmap_info { /* packet unmapping info, overlays skb->cb */
101 int sflit; /* start flit of first SGL entry in Tx descriptor */
102 u16 fragidx; /* first page fragment in current Tx descriptor */
103 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
104 u32 len; /* mapped length of skb main body */
105};
106
99d7cf30
DLR
107/*
108 * Holds unmapping information for Tx packets that need deferred unmapping.
109 * This structure lives at skb->head and must be allocated by callers.
110 */
111struct deferred_unmap_info {
112 struct pci_dev *pdev;
113 dma_addr_t addr[MAX_SKB_FRAGS + 1];
114};
115
4d22de3e
DLR
116/*
117 * Maps a number of flits to the number of Tx descriptors that can hold them.
118 * The formula is
119 *
120 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
121 *
122 * HW allows up to 4 descriptors to be combined into a WR.
123 */
124static u8 flit_desc_map[] = {
125 0,
126#if SGE_NUM_GENBITS == 1
127 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
128 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
129 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
130 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
131#elif SGE_NUM_GENBITS == 2
132 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
133 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
134 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
135 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
136#else
137# error "SGE_NUM_GENBITS must be 1 or 2"
138#endif
139};
140
141static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
142{
143 return container_of(q, struct sge_qset, fl[qidx]);
144}
145
146static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
147{
148 return container_of(q, struct sge_qset, rspq);
149}
150
151static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
152{
153 return container_of(q, struct sge_qset, txq[qidx]);
154}
155
156/**
157 * refill_rspq - replenish an SGE response queue
158 * @adapter: the adapter
159 * @q: the response queue to replenish
160 * @credits: how many new responses to make available
161 *
162 * Replenishes a response queue by making the supplied number of responses
163 * available to HW.
164 */
165static inline void refill_rspq(struct adapter *adapter,
166 const struct sge_rspq *q, unsigned int credits)
167{
168 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
169 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
170}
171
172/**
173 * need_skb_unmap - does the platform need unmapping of sk_buffs?
174 *
175 * Returns true if the platfrom needs sk_buff unmapping. The compiler
176 * optimizes away unecessary code if this returns true.
177 */
178static inline int need_skb_unmap(void)
179{
180 /*
181 * This structure is used to tell if the platfrom needs buffer
182 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
183 */
184 struct dummy {
185 DECLARE_PCI_UNMAP_ADDR(addr);
186 };
187
188 return sizeof(struct dummy) != 0;
189}
190
191/**
192 * unmap_skb - unmap a packet main body and its page fragments
193 * @skb: the packet
194 * @q: the Tx queue containing Tx descriptors for the packet
195 * @cidx: index of Tx descriptor
196 * @pdev: the PCI device
197 *
198 * Unmap the main body of an sk_buff and its page fragments, if any.
199 * Because of the fairly complicated structure of our SGLs and the desire
200 * to conserve space for metadata, we keep the information necessary to
201 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
202 * in the Tx descriptors (the physical addresses of the various data
203 * buffers). The send functions initialize the state in skb->cb so we
204 * can unmap the buffers held in the first Tx descriptor here, and we
205 * have enough information at this point to update the state for the next
206 * Tx descriptor.
207 */
208static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
209 unsigned int cidx, struct pci_dev *pdev)
210{
211 const struct sg_ent *sgp;
212 struct unmap_info *ui = (struct unmap_info *)skb->cb;
213 int nfrags, frag_idx, curflit, j = ui->addr_idx;
214
215 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
216
217 if (ui->len) {
218 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
219 PCI_DMA_TODEVICE);
220 ui->len = 0; /* so we know for next descriptor for this skb */
221 j = 1;
222 }
223
224 frag_idx = ui->fragidx;
225 curflit = ui->sflit + 1 + j;
226 nfrags = skb_shinfo(skb)->nr_frags;
227
228 while (frag_idx < nfrags && curflit < WR_FLITS) {
229 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
230 skb_shinfo(skb)->frags[frag_idx].size,
231 PCI_DMA_TODEVICE);
232 j ^= 1;
233 if (j == 0) {
234 sgp++;
235 curflit++;
236 }
237 curflit++;
238 frag_idx++;
239 }
240
241 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
242 ui->fragidx = frag_idx;
243 ui->addr_idx = j;
244 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
245 }
246}
247
248/**
249 * free_tx_desc - reclaims Tx descriptors and their buffers
250 * @adapter: the adapter
251 * @q: the Tx queue to reclaim descriptors from
252 * @n: the number of descriptors to reclaim
253 *
254 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
255 * Tx buffers. Called with the Tx queue lock held.
256 */
257static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
258 unsigned int n)
259{
260 struct tx_sw_desc *d;
261 struct pci_dev *pdev = adapter->pdev;
262 unsigned int cidx = q->cidx;
263
99d7cf30
DLR
264 const int need_unmap = need_skb_unmap() &&
265 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
266
4d22de3e
DLR
267 d = &q->sdesc[cidx];
268 while (n--) {
269 if (d->skb) { /* an SGL is present */
99d7cf30 270 if (need_unmap)
4d22de3e
DLR
271 unmap_skb(d->skb, q, cidx, pdev);
272 if (d->skb->priority == cidx)
273 kfree_skb(d->skb);
274 }
275 ++d;
276 if (++cidx == q->size) {
277 cidx = 0;
278 d = q->sdesc;
279 }
280 }
281 q->cidx = cidx;
282}
283
284/**
285 * reclaim_completed_tx - reclaims completed Tx descriptors
286 * @adapter: the adapter
287 * @q: the Tx queue to reclaim completed descriptors from
288 *
289 * Reclaims Tx descriptors that the SGE has indicated it has processed,
290 * and frees the associated buffers if possible. Called with the Tx
291 * queue's lock held.
292 */
293static inline void reclaim_completed_tx(struct adapter *adapter,
294 struct sge_txq *q)
295{
296 unsigned int reclaim = q->processed - q->cleaned;
297
298 if (reclaim) {
299 free_tx_desc(adapter, q, reclaim);
300 q->cleaned += reclaim;
301 q->in_use -= reclaim;
302 }
303}
304
305/**
306 * should_restart_tx - are there enough resources to restart a Tx queue?
307 * @q: the Tx queue
308 *
309 * Checks if there are enough descriptors to restart a suspended Tx queue.
310 */
311static inline int should_restart_tx(const struct sge_txq *q)
312{
313 unsigned int r = q->processed - q->cleaned;
314
315 return q->in_use - r < (q->size >> 1);
316}
317
318/**
319 * free_rx_bufs - free the Rx buffers on an SGE free list
320 * @pdev: the PCI device associated with the adapter
321 * @rxq: the SGE free list to clean up
322 *
323 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
324 * this queue should be stopped before calling this function.
325 */
326static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
327{
328 unsigned int cidx = q->cidx;
329
330 while (q->credits--) {
331 struct rx_sw_desc *d = &q->sdesc[cidx];
332
333 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
334 q->buf_size, PCI_DMA_FROMDEVICE);
335 kfree_skb(d->skb);
336 d->skb = NULL;
337 if (++cidx == q->size)
338 cidx = 0;
339 }
340}
341
342/**
343 * add_one_rx_buf - add a packet buffer to a free-buffer list
344 * @skb: the buffer to add
345 * @len: the buffer length
346 * @d: the HW Rx descriptor to write
347 * @sd: the SW Rx descriptor to write
348 * @gen: the generation bit value
349 * @pdev: the PCI device associated with the adapter
350 *
351 * Add a buffer of the given length to the supplied HW and SW Rx
352 * descriptors.
353 */
354static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
355 struct rx_desc *d, struct rx_sw_desc *sd,
356 unsigned int gen, struct pci_dev *pdev)
357{
358 dma_addr_t mapping;
359
360 sd->skb = skb;
361 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
362 pci_unmap_addr_set(sd, dma_addr, mapping);
363
364 d->addr_lo = cpu_to_be32(mapping);
365 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
366 wmb();
367 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
368 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
369}
370
371/**
372 * refill_fl - refill an SGE free-buffer list
373 * @adapter: the adapter
374 * @q: the free-list to refill
375 * @n: the number of new buffers to allocate
376 * @gfp: the gfp flags for allocating new buffers
377 *
378 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
379 * allocated with the supplied gfp flags. The caller must assure that
380 * @n does not exceed the queue's capacity.
381 */
382static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
383{
384 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
385 struct rx_desc *d = &q->desc[q->pidx];
386
387 while (n--) {
388 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
389
390 if (!skb)
391 break;
392
393 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
394 d++;
395 sd++;
396 if (++q->pidx == q->size) {
397 q->pidx = 0;
398 q->gen ^= 1;
399 sd = q->sdesc;
400 d = q->desc;
401 }
402 q->credits++;
403 }
404
405 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
406}
407
408static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
409{
410 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
411}
412
413/**
414 * recycle_rx_buf - recycle a receive buffer
415 * @adapter: the adapter
416 * @q: the SGE free list
417 * @idx: index of buffer to recycle
418 *
419 * Recycles the specified buffer on the given free list by adding it at
420 * the next available slot on the list.
421 */
422static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
423 unsigned int idx)
424{
425 struct rx_desc *from = &q->desc[idx];
426 struct rx_desc *to = &q->desc[q->pidx];
427
428 q->sdesc[q->pidx] = q->sdesc[idx];
429 to->addr_lo = from->addr_lo; /* already big endian */
430 to->addr_hi = from->addr_hi; /* likewise */
431 wmb();
432 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
433 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
434 q->credits++;
435
436 if (++q->pidx == q->size) {
437 q->pidx = 0;
438 q->gen ^= 1;
439 }
440 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
441}
442
443/**
444 * alloc_ring - allocate resources for an SGE descriptor ring
445 * @pdev: the PCI device
446 * @nelem: the number of descriptors
447 * @elem_size: the size of each descriptor
448 * @sw_size: the size of the SW state associated with each ring element
449 * @phys: the physical address of the allocated ring
450 * @metadata: address of the array holding the SW state for the ring
451 *
452 * Allocates resources for an SGE descriptor ring, such as Tx queues,
453 * free buffer lists, or response queues. Each SGE ring requires
454 * space for its HW descriptors plus, optionally, space for the SW state
455 * associated with each HW entry (the metadata). The function returns
456 * three values: the virtual address for the HW ring (the return value
457 * of the function), the physical address of the HW ring, and the address
458 * of the SW ring.
459 */
460static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
461 size_t sw_size, dma_addr_t *phys, void *metadata)
462{
463 size_t len = nelem * elem_size;
464 void *s = NULL;
465 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
466
467 if (!p)
468 return NULL;
469 if (sw_size) {
470 s = kcalloc(nelem, sw_size, GFP_KERNEL);
471
472 if (!s) {
473 dma_free_coherent(&pdev->dev, len, p, *phys);
474 return NULL;
475 }
476 }
477 if (metadata)
478 *(void **)metadata = s;
479 memset(p, 0, len);
480 return p;
481}
482
483/**
484 * free_qset - free the resources of an SGE queue set
485 * @adapter: the adapter owning the queue set
486 * @q: the queue set
487 *
488 * Release the HW and SW resources associated with an SGE queue set, such
489 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
490 * queue set must be quiesced prior to calling this.
491 */
492void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
493{
494 int i;
495 struct pci_dev *pdev = adapter->pdev;
496
497 if (q->tx_reclaim_timer.function)
498 del_timer_sync(&q->tx_reclaim_timer);
499
500 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
501 if (q->fl[i].desc) {
502 spin_lock(&adapter->sge.reg_lock);
503 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
504 spin_unlock(&adapter->sge.reg_lock);
505 free_rx_bufs(pdev, &q->fl[i]);
506 kfree(q->fl[i].sdesc);
507 dma_free_coherent(&pdev->dev,
508 q->fl[i].size *
509 sizeof(struct rx_desc), q->fl[i].desc,
510 q->fl[i].phys_addr);
511 }
512
513 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
514 if (q->txq[i].desc) {
515 spin_lock(&adapter->sge.reg_lock);
516 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
517 spin_unlock(&adapter->sge.reg_lock);
518 if (q->txq[i].sdesc) {
519 free_tx_desc(adapter, &q->txq[i],
520 q->txq[i].in_use);
521 kfree(q->txq[i].sdesc);
522 }
523 dma_free_coherent(&pdev->dev,
524 q->txq[i].size *
525 sizeof(struct tx_desc),
526 q->txq[i].desc, q->txq[i].phys_addr);
527 __skb_queue_purge(&q->txq[i].sendq);
528 }
529
530 if (q->rspq.desc) {
531 spin_lock(&adapter->sge.reg_lock);
532 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
533 spin_unlock(&adapter->sge.reg_lock);
534 dma_free_coherent(&pdev->dev,
535 q->rspq.size * sizeof(struct rsp_desc),
536 q->rspq.desc, q->rspq.phys_addr);
537 }
538
539 if (q->netdev)
540 q->netdev->atalk_ptr = NULL;
541
542 memset(q, 0, sizeof(*q));
543}
544
545/**
546 * init_qset_cntxt - initialize an SGE queue set context info
547 * @qs: the queue set
548 * @id: the queue set id
549 *
550 * Initializes the TIDs and context ids for the queues of a queue set.
551 */
552static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
553{
554 qs->rspq.cntxt_id = id;
555 qs->fl[0].cntxt_id = 2 * id;
556 qs->fl[1].cntxt_id = 2 * id + 1;
557 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
558 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
559 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
560 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
561 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
562}
563
564/**
565 * sgl_len - calculates the size of an SGL of the given capacity
566 * @n: the number of SGL entries
567 *
568 * Calculates the number of flits needed for a scatter/gather list that
569 * can hold the given number of entries.
570 */
571static inline unsigned int sgl_len(unsigned int n)
572{
573 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
574 return (3 * n) / 2 + (n & 1);
575}
576
577/**
578 * flits_to_desc - returns the num of Tx descriptors for the given flits
579 * @n: the number of flits
580 *
581 * Calculates the number of Tx descriptors needed for the supplied number
582 * of flits.
583 */
584static inline unsigned int flits_to_desc(unsigned int n)
585{
586 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
587 return flit_desc_map[n];
588}
589
590/**
591 * get_packet - return the next ingress packet buffer from a free list
592 * @adap: the adapter that received the packet
593 * @fl: the SGE free list holding the packet
594 * @len: the packet length including any SGE padding
595 * @drop_thres: # of remaining buffers before we start dropping packets
596 *
597 * Get the next packet from a free list and complete setup of the
598 * sk_buff. If the packet is small we make a copy and recycle the
599 * original buffer, otherwise we use the original buffer itself. If a
600 * positive drop threshold is supplied packets are dropped and their
601 * buffers recycled if (a) the number of remaining buffers is under the
602 * threshold and the packet is too big to copy, or (b) the packet should
603 * be copied but there is no memory for the copy.
604 */
605static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
606 unsigned int len, unsigned int drop_thres)
607{
608 struct sk_buff *skb = NULL;
609 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
610
611 prefetch(sd->skb->data);
612
613 if (len <= SGE_RX_COPY_THRES) {
614 skb = alloc_skb(len, GFP_ATOMIC);
615 if (likely(skb != NULL)) {
616 __skb_put(skb, len);
617 pci_dma_sync_single_for_cpu(adap->pdev,
618 pci_unmap_addr(sd,
619 dma_addr),
620 len, PCI_DMA_FROMDEVICE);
621 memcpy(skb->data, sd->skb->data, len);
622 pci_dma_sync_single_for_device(adap->pdev,
623 pci_unmap_addr(sd,
624 dma_addr),
625 len, PCI_DMA_FROMDEVICE);
626 } else if (!drop_thres)
627 goto use_orig_buf;
628 recycle:
629 recycle_rx_buf(adap, fl, fl->cidx);
630 return skb;
631 }
632
633 if (unlikely(fl->credits < drop_thres))
634 goto recycle;
635
636 use_orig_buf:
637 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
638 fl->buf_size, PCI_DMA_FROMDEVICE);
639 skb = sd->skb;
640 skb_put(skb, len);
641 __refill_fl(adap, fl);
642 return skb;
643}
644
645/**
646 * get_imm_packet - return the next ingress packet buffer from a response
647 * @resp: the response descriptor containing the packet data
648 *
649 * Return a packet containing the immediate data of the given response.
650 */
651static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
652{
653 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
654
655 if (skb) {
656 __skb_put(skb, IMMED_PKT_SIZE);
657 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
658 }
659 return skb;
660}
661
662/**
663 * calc_tx_descs - calculate the number of Tx descriptors for a packet
664 * @skb: the packet
665 *
666 * Returns the number of Tx descriptors needed for the given Ethernet
667 * packet. Ethernet packets require addition of WR and CPL headers.
668 */
669static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
670{
671 unsigned int flits;
672
673 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
674 return 1;
675
676 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
677 if (skb_shinfo(skb)->gso_size)
678 flits++;
679 return flits_to_desc(flits);
680}
681
682/**
683 * make_sgl - populate a scatter/gather list for a packet
684 * @skb: the packet
685 * @sgp: the SGL to populate
686 * @start: start address of skb main body data to include in the SGL
687 * @len: length of skb main body data to include in the SGL
688 * @pdev: the PCI device
689 *
690 * Generates a scatter/gather list for the buffers that make up a packet
691 * and returns the SGL size in 8-byte words. The caller must size the SGL
692 * appropriately.
693 */
694static inline unsigned int make_sgl(const struct sk_buff *skb,
695 struct sg_ent *sgp, unsigned char *start,
696 unsigned int len, struct pci_dev *pdev)
697{
698 dma_addr_t mapping;
699 unsigned int i, j = 0, nfrags;
700
701 if (len) {
702 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
703 sgp->len[0] = cpu_to_be32(len);
704 sgp->addr[0] = cpu_to_be64(mapping);
705 j = 1;
706 }
707
708 nfrags = skb_shinfo(skb)->nr_frags;
709 for (i = 0; i < nfrags; i++) {
710 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
711
712 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
713 frag->size, PCI_DMA_TODEVICE);
714 sgp->len[j] = cpu_to_be32(frag->size);
715 sgp->addr[j] = cpu_to_be64(mapping);
716 j ^= 1;
717 if (j == 0)
718 ++sgp;
719 }
720 if (j)
721 sgp->len[j] = 0;
722 return ((nfrags + (len != 0)) * 3) / 2 + j;
723}
724
725/**
726 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
727 * @adap: the adapter
728 * @q: the Tx queue
729 *
730 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
731 * where the HW is going to sleep just after we checked, however,
732 * then the interrupt handler will detect the outstanding TX packet
733 * and ring the doorbell for us.
734 *
735 * When GTS is disabled we unconditionally ring the doorbell.
736 */
737static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
738{
739#if USE_GTS
740 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
741 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
742 set_bit(TXQ_LAST_PKT_DB, &q->flags);
743 t3_write_reg(adap, A_SG_KDOORBELL,
744 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
745 }
746#else
747 wmb(); /* write descriptors before telling HW */
748 t3_write_reg(adap, A_SG_KDOORBELL,
749 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
750#endif
751}
752
753static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
754{
755#if SGE_NUM_GENBITS == 2
756 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
757#endif
758}
759
760/**
761 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
762 * @ndesc: number of Tx descriptors spanned by the SGL
763 * @skb: the packet corresponding to the WR
764 * @d: first Tx descriptor to be written
765 * @pidx: index of above descriptors
766 * @q: the SGE Tx queue
767 * @sgl: the SGL
768 * @flits: number of flits to the start of the SGL in the first descriptor
769 * @sgl_flits: the SGL size in flits
770 * @gen: the Tx descriptor generation
771 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
772 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
773 *
774 * Write a work request header and an associated SGL. If the SGL is
775 * small enough to fit into one Tx descriptor it has already been written
776 * and we just need to write the WR header. Otherwise we distribute the
777 * SGL across the number of descriptors it spans.
778 */
779static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
780 struct tx_desc *d, unsigned int pidx,
781 const struct sge_txq *q,
782 const struct sg_ent *sgl,
783 unsigned int flits, unsigned int sgl_flits,
784 unsigned int gen, unsigned int wr_hi,
785 unsigned int wr_lo)
786{
787 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
788 struct tx_sw_desc *sd = &q->sdesc[pidx];
789
790 sd->skb = skb;
791 if (need_skb_unmap()) {
792 struct unmap_info *ui = (struct unmap_info *)skb->cb;
793
794 ui->fragidx = 0;
795 ui->addr_idx = 0;
796 ui->sflit = flits;
797 }
798
799 if (likely(ndesc == 1)) {
800 skb->priority = pidx;
801 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
802 V_WR_SGLSFLT(flits)) | wr_hi;
803 wmb();
804 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
805 V_WR_GEN(gen)) | wr_lo;
806 wr_gen2(d, gen);
807 } else {
808 unsigned int ogen = gen;
809 const u64 *fp = (const u64 *)sgl;
810 struct work_request_hdr *wp = wrp;
811
812 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
813 V_WR_SGLSFLT(flits)) | wr_hi;
814
815 while (sgl_flits) {
816 unsigned int avail = WR_FLITS - flits;
817
818 if (avail > sgl_flits)
819 avail = sgl_flits;
820 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
821 sgl_flits -= avail;
822 ndesc--;
823 if (!sgl_flits)
824 break;
825
826 fp += avail;
827 d++;
828 sd++;
829 if (++pidx == q->size) {
830 pidx = 0;
831 gen ^= 1;
832 d = q->desc;
833 sd = q->sdesc;
834 }
835
836 sd->skb = skb;
837 wrp = (struct work_request_hdr *)d;
838 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
839 V_WR_SGLSFLT(1)) | wr_hi;
840 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
841 sgl_flits + 1)) |
842 V_WR_GEN(gen)) | wr_lo;
843 wr_gen2(d, gen);
844 flits = 1;
845 }
846 skb->priority = pidx;
847 wrp->wr_hi |= htonl(F_WR_EOP);
848 wmb();
849 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
850 wr_gen2((struct tx_desc *)wp, ogen);
851 WARN_ON(ndesc != 0);
852 }
853}
854
855/**
856 * write_tx_pkt_wr - write a TX_PKT work request
857 * @adap: the adapter
858 * @skb: the packet to send
859 * @pi: the egress interface
860 * @pidx: index of the first Tx descriptor to write
861 * @gen: the generation value to use
862 * @q: the Tx queue
863 * @ndesc: number of descriptors the packet will occupy
864 * @compl: the value of the COMPL bit to use
865 *
866 * Generate a TX_PKT work request to send the supplied packet.
867 */
868static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
869 const struct port_info *pi,
870 unsigned int pidx, unsigned int gen,
871 struct sge_txq *q, unsigned int ndesc,
872 unsigned int compl)
873{
874 unsigned int flits, sgl_flits, cntrl, tso_info;
875 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
876 struct tx_desc *d = &q->desc[pidx];
877 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
878
879 cpl->len = htonl(skb->len | 0x80000000);
880 cntrl = V_TXPKT_INTF(pi->port_id);
881
882 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
883 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
884
885 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
886 if (tso_info) {
887 int eth_type;
888 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
889
890 d->flit[2] = 0;
891 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
892 hdr->cntrl = htonl(cntrl);
893 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
894 CPL_ETH_II : CPL_ETH_II_VLAN;
895 tso_info |= V_LSO_ETH_TYPE(eth_type) |
896 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
897 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
898 hdr->lso_info = htonl(tso_info);
899 flits = 3;
900 } else {
901 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
902 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
903 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
904 cpl->cntrl = htonl(cntrl);
905
906 if (skb->len <= WR_LEN - sizeof(*cpl)) {
907 q->sdesc[pidx].skb = NULL;
908 if (!skb->data_len)
909 memcpy(&d->flit[2], skb->data, skb->len);
910 else
911 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
912
913 flits = (skb->len + 7) / 8 + 2;
914 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
915 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
916 | F_WR_SOP | F_WR_EOP | compl);
917 wmb();
918 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
919 V_WR_TID(q->token));
920 wr_gen2(d, gen);
921 kfree_skb(skb);
922 return;
923 }
924
925 flits = 2;
926 }
927
928 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
929 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
930 if (need_skb_unmap())
931 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
932
933 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
934 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
935 htonl(V_WR_TID(q->token)));
936}
937
938/**
939 * eth_xmit - add a packet to the Ethernet Tx queue
940 * @skb: the packet
941 * @dev: the egress net device
942 *
943 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
944 */
945int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
946{
947 unsigned int ndesc, pidx, credits, gen, compl;
948 const struct port_info *pi = netdev_priv(dev);
949 struct adapter *adap = dev->priv;
950 struct sge_qset *qs = dev2qset(dev);
951 struct sge_txq *q = &qs->txq[TXQ_ETH];
952
953 /*
954 * The chip min packet length is 9 octets but play safe and reject
955 * anything shorter than an Ethernet header.
956 */
957 if (unlikely(skb->len < ETH_HLEN)) {
958 dev_kfree_skb(skb);
959 return NETDEV_TX_OK;
960 }
961
962 spin_lock(&q->lock);
963 reclaim_completed_tx(adap, q);
964
965 credits = q->size - q->in_use;
966 ndesc = calc_tx_descs(skb);
967
968 if (unlikely(credits < ndesc)) {
969 if (!netif_queue_stopped(dev)) {
970 netif_stop_queue(dev);
971 set_bit(TXQ_ETH, &qs->txq_stopped);
972 q->stops++;
973 dev_err(&adap->pdev->dev,
974 "%s: Tx ring %u full while queue awake!\n",
975 dev->name, q->cntxt_id & 7);
976 }
977 spin_unlock(&q->lock);
978 return NETDEV_TX_BUSY;
979 }
980
981 q->in_use += ndesc;
982 if (unlikely(credits - ndesc < q->stop_thres)) {
983 q->stops++;
984 netif_stop_queue(dev);
985 set_bit(TXQ_ETH, &qs->txq_stopped);
986#if !USE_GTS
987 if (should_restart_tx(q) &&
988 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
989 q->restarts++;
990 netif_wake_queue(dev);
991 }
992#endif
993 }
994
995 gen = q->gen;
996 q->unacked += ndesc;
997 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
998 q->unacked &= 7;
999 pidx = q->pidx;
1000 q->pidx += ndesc;
1001 if (q->pidx >= q->size) {
1002 q->pidx -= q->size;
1003 q->gen ^= 1;
1004 }
1005
1006 /* update port statistics */
1007 if (skb->ip_summed == CHECKSUM_COMPLETE)
1008 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1009 if (skb_shinfo(skb)->gso_size)
1010 qs->port_stats[SGE_PSTAT_TSO]++;
1011 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1012 qs->port_stats[SGE_PSTAT_VLANINS]++;
1013
1014 dev->trans_start = jiffies;
1015 spin_unlock(&q->lock);
1016
1017 /*
1018 * We do not use Tx completion interrupts to free DMAd Tx packets.
1019 * This is good for performamce but means that we rely on new Tx
1020 * packets arriving to run the destructors of completed packets,
1021 * which open up space in their sockets' send queues. Sometimes
1022 * we do not get such new packets causing Tx to stall. A single
1023 * UDP transmitter is a good example of this situation. We have
1024 * a clean up timer that periodically reclaims completed packets
1025 * but it doesn't run often enough (nor do we want it to) to prevent
1026 * lengthy stalls. A solution to this problem is to run the
1027 * destructor early, after the packet is queued but before it's DMAd.
1028 * A cons is that we lie to socket memory accounting, but the amount
1029 * of extra memory is reasonable (limited by the number of Tx
1030 * descriptors), the packets do actually get freed quickly by new
1031 * packets almost always, and for protocols like TCP that wait for
1032 * acks to really free up the data the extra memory is even less.
1033 * On the positive side we run the destructors on the sending CPU
1034 * rather than on a potentially different completing CPU, usually a
1035 * good thing. We also run them without holding our Tx queue lock,
1036 * unlike what reclaim_completed_tx() would otherwise do.
1037 *
1038 * Run the destructor before telling the DMA engine about the packet
1039 * to make sure it doesn't complete and get freed prematurely.
1040 */
1041 if (likely(!skb_shared(skb)))
1042 skb_orphan(skb);
1043
1044 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1045 check_ring_tx_db(adap, q);
1046 return NETDEV_TX_OK;
1047}
1048
1049/**
1050 * write_imm - write a packet into a Tx descriptor as immediate data
1051 * @d: the Tx descriptor to write
1052 * @skb: the packet
1053 * @len: the length of packet data to write as immediate data
1054 * @gen: the generation bit value to write
1055 *
1056 * Writes a packet as immediate data into a Tx descriptor. The packet
1057 * contains a work request at its beginning. We must write the packet
1058 * carefully so the SGE doesn't read accidentally before it's written in
1059 * its entirety.
1060 */
1061static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1062 unsigned int len, unsigned int gen)
1063{
1064 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1065 struct work_request_hdr *to = (struct work_request_hdr *)d;
1066
1067 memcpy(&to[1], &from[1], len - sizeof(*from));
1068 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1069 V_WR_BCNTLFLT(len & 7));
1070 wmb();
1071 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1072 V_WR_LEN((len + 7) / 8));
1073 wr_gen2(d, gen);
1074 kfree_skb(skb);
1075}
1076
1077/**
1078 * check_desc_avail - check descriptor availability on a send queue
1079 * @adap: the adapter
1080 * @q: the send queue
1081 * @skb: the packet needing the descriptors
1082 * @ndesc: the number of Tx descriptors needed
1083 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1084 *
1085 * Checks if the requested number of Tx descriptors is available on an
1086 * SGE send queue. If the queue is already suspended or not enough
1087 * descriptors are available the packet is queued for later transmission.
1088 * Must be called with the Tx queue locked.
1089 *
1090 * Returns 0 if enough descriptors are available, 1 if there aren't
1091 * enough descriptors and the packet has been queued, and 2 if the caller
1092 * needs to retry because there weren't enough descriptors at the
1093 * beginning of the call but some freed up in the mean time.
1094 */
1095static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1096 struct sk_buff *skb, unsigned int ndesc,
1097 unsigned int qid)
1098{
1099 if (unlikely(!skb_queue_empty(&q->sendq))) {
1100 addq_exit:__skb_queue_tail(&q->sendq, skb);
1101 return 1;
1102 }
1103 if (unlikely(q->size - q->in_use < ndesc)) {
1104 struct sge_qset *qs = txq_to_qset(q, qid);
1105
1106 set_bit(qid, &qs->txq_stopped);
1107 smp_mb__after_clear_bit();
1108
1109 if (should_restart_tx(q) &&
1110 test_and_clear_bit(qid, &qs->txq_stopped))
1111 return 2;
1112
1113 q->stops++;
1114 goto addq_exit;
1115 }
1116 return 0;
1117}
1118
1119/**
1120 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1121 * @q: the SGE control Tx queue
1122 *
1123 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1124 * that send only immediate data (presently just the control queues) and
1125 * thus do not have any sk_buffs to release.
1126 */
1127static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1128{
1129 unsigned int reclaim = q->processed - q->cleaned;
1130
1131 q->in_use -= reclaim;
1132 q->cleaned += reclaim;
1133}
1134
1135static inline int immediate(const struct sk_buff *skb)
1136{
1137 return skb->len <= WR_LEN && !skb->data_len;
1138}
1139
1140/**
1141 * ctrl_xmit - send a packet through an SGE control Tx queue
1142 * @adap: the adapter
1143 * @q: the control queue
1144 * @skb: the packet
1145 *
1146 * Send a packet through an SGE control Tx queue. Packets sent through
1147 * a control queue must fit entirely as immediate data in a single Tx
1148 * descriptor and have no page fragments.
1149 */
1150static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1151 struct sk_buff *skb)
1152{
1153 int ret;
1154 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1155
1156 if (unlikely(!immediate(skb))) {
1157 WARN_ON(1);
1158 dev_kfree_skb(skb);
1159 return NET_XMIT_SUCCESS;
1160 }
1161
1162 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1163 wrp->wr_lo = htonl(V_WR_TID(q->token));
1164
1165 spin_lock(&q->lock);
1166 again:reclaim_completed_tx_imm(q);
1167
1168 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1169 if (unlikely(ret)) {
1170 if (ret == 1) {
1171 spin_unlock(&q->lock);
1172 return NET_XMIT_CN;
1173 }
1174 goto again;
1175 }
1176
1177 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1178
1179 q->in_use++;
1180 if (++q->pidx >= q->size) {
1181 q->pidx = 0;
1182 q->gen ^= 1;
1183 }
1184 spin_unlock(&q->lock);
1185 wmb();
1186 t3_write_reg(adap, A_SG_KDOORBELL,
1187 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1188 return NET_XMIT_SUCCESS;
1189}
1190
1191/**
1192 * restart_ctrlq - restart a suspended control queue
1193 * @qs: the queue set cotaining the control queue
1194 *
1195 * Resumes transmission on a suspended Tx control queue.
1196 */
1197static void restart_ctrlq(unsigned long data)
1198{
1199 struct sk_buff *skb;
1200 struct sge_qset *qs = (struct sge_qset *)data;
1201 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1202 struct adapter *adap = qs->netdev->priv;
1203
1204 spin_lock(&q->lock);
1205 again:reclaim_completed_tx_imm(q);
1206
1207 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1208
1209 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1210
1211 if (++q->pidx >= q->size) {
1212 q->pidx = 0;
1213 q->gen ^= 1;
1214 }
1215 q->in_use++;
1216 }
1217
1218 if (!skb_queue_empty(&q->sendq)) {
1219 set_bit(TXQ_CTRL, &qs->txq_stopped);
1220 smp_mb__after_clear_bit();
1221
1222 if (should_restart_tx(q) &&
1223 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1224 goto again;
1225 q->stops++;
1226 }
1227
1228 spin_unlock(&q->lock);
1229 t3_write_reg(adap, A_SG_KDOORBELL,
1230 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1231}
1232
14ab9892
DLR
1233/*
1234 * Send a management message through control queue 0
1235 */
1236int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1237{
1238 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1239}
1240
99d7cf30
DLR
1241/**
1242 * deferred_unmap_destructor - unmap a packet when it is freed
1243 * @skb: the packet
1244 *
1245 * This is the packet destructor used for Tx packets that need to remain
1246 * mapped until they are freed rather than until their Tx descriptors are
1247 * freed.
1248 */
1249static void deferred_unmap_destructor(struct sk_buff *skb)
1250{
1251 int i;
1252 const dma_addr_t *p;
1253 const struct skb_shared_info *si;
1254 const struct deferred_unmap_info *dui;
1255 const struct unmap_info *ui = (struct unmap_info *)skb->cb;
1256
1257 dui = (struct deferred_unmap_info *)skb->head;
1258 p = dui->addr;
1259
1260 if (ui->len)
1261 pci_unmap_single(dui->pdev, *p++, ui->len, PCI_DMA_TODEVICE);
1262
1263 si = skb_shinfo(skb);
1264 for (i = 0; i < si->nr_frags; i++)
1265 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1266 PCI_DMA_TODEVICE);
1267}
1268
1269static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1270 const struct sg_ent *sgl, int sgl_flits)
1271{
1272 dma_addr_t *p;
1273 struct deferred_unmap_info *dui;
1274
1275 dui = (struct deferred_unmap_info *)skb->head;
1276 dui->pdev = pdev;
1277 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1278 *p++ = be64_to_cpu(sgl->addr[0]);
1279 *p++ = be64_to_cpu(sgl->addr[1]);
1280 }
1281 if (sgl_flits)
1282 *p = be64_to_cpu(sgl->addr[0]);
1283}
1284
4d22de3e
DLR
1285/**
1286 * write_ofld_wr - write an offload work request
1287 * @adap: the adapter
1288 * @skb: the packet to send
1289 * @q: the Tx queue
1290 * @pidx: index of the first Tx descriptor to write
1291 * @gen: the generation value to use
1292 * @ndesc: number of descriptors the packet will occupy
1293 *
1294 * Write an offload work request to send the supplied packet. The packet
1295 * data already carry the work request with most fields populated.
1296 */
1297static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1298 struct sge_txq *q, unsigned int pidx,
1299 unsigned int gen, unsigned int ndesc)
1300{
1301 unsigned int sgl_flits, flits;
1302 struct work_request_hdr *from;
1303 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1304 struct tx_desc *d = &q->desc[pidx];
1305
1306 if (immediate(skb)) {
1307 q->sdesc[pidx].skb = NULL;
1308 write_imm(d, skb, skb->len, gen);
1309 return;
1310 }
1311
1312 /* Only TX_DATA builds SGLs */
1313
1314 from = (struct work_request_hdr *)skb->data;
1315 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1316
1317 flits = (skb->h.raw - skb->data) / 8;
1318 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1319 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1320 adap->pdev);
99d7cf30
DLR
1321 if (need_skb_unmap()) {
1322 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1323 skb->destructor = deferred_unmap_destructor;
4d22de3e 1324 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
99d7cf30 1325 }
4d22de3e
DLR
1326
1327 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1328 gen, from->wr_hi, from->wr_lo);
1329}
1330
1331/**
1332 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1333 * @skb: the packet
1334 *
1335 * Returns the number of Tx descriptors needed for the given offload
1336 * packet. These packets are already fully constructed.
1337 */
1338static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1339{
1340 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1341
1342 if (skb->len <= WR_LEN && cnt == 0)
1343 return 1; /* packet fits as immediate data */
1344
1345 flits = (skb->h.raw - skb->data) / 8; /* headers */
1346 if (skb->tail != skb->h.raw)
1347 cnt++;
1348 return flits_to_desc(flits + sgl_len(cnt));
1349}
1350
1351/**
1352 * ofld_xmit - send a packet through an offload queue
1353 * @adap: the adapter
1354 * @q: the Tx offload queue
1355 * @skb: the packet
1356 *
1357 * Send an offload packet through an SGE offload queue.
1358 */
1359static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1360 struct sk_buff *skb)
1361{
1362 int ret;
1363 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1364
1365 spin_lock(&q->lock);
1366 again:reclaim_completed_tx(adap, q);
1367
1368 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1369 if (unlikely(ret)) {
1370 if (ret == 1) {
1371 skb->priority = ndesc; /* save for restart */
1372 spin_unlock(&q->lock);
1373 return NET_XMIT_CN;
1374 }
1375 goto again;
1376 }
1377
1378 gen = q->gen;
1379 q->in_use += ndesc;
1380 pidx = q->pidx;
1381 q->pidx += ndesc;
1382 if (q->pidx >= q->size) {
1383 q->pidx -= q->size;
1384 q->gen ^= 1;
1385 }
1386 spin_unlock(&q->lock);
1387
1388 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1389 check_ring_tx_db(adap, q);
1390 return NET_XMIT_SUCCESS;
1391}
1392
1393/**
1394 * restart_offloadq - restart a suspended offload queue
1395 * @qs: the queue set cotaining the offload queue
1396 *
1397 * Resumes transmission on a suspended Tx offload queue.
1398 */
1399static void restart_offloadq(unsigned long data)
1400{
1401 struct sk_buff *skb;
1402 struct sge_qset *qs = (struct sge_qset *)data;
1403 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1404 struct adapter *adap = qs->netdev->priv;
1405
1406 spin_lock(&q->lock);
1407 again:reclaim_completed_tx(adap, q);
1408
1409 while ((skb = skb_peek(&q->sendq)) != NULL) {
1410 unsigned int gen, pidx;
1411 unsigned int ndesc = skb->priority;
1412
1413 if (unlikely(q->size - q->in_use < ndesc)) {
1414 set_bit(TXQ_OFLD, &qs->txq_stopped);
1415 smp_mb__after_clear_bit();
1416
1417 if (should_restart_tx(q) &&
1418 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1419 goto again;
1420 q->stops++;
1421 break;
1422 }
1423
1424 gen = q->gen;
1425 q->in_use += ndesc;
1426 pidx = q->pidx;
1427 q->pidx += ndesc;
1428 if (q->pidx >= q->size) {
1429 q->pidx -= q->size;
1430 q->gen ^= 1;
1431 }
1432 __skb_unlink(skb, &q->sendq);
1433 spin_unlock(&q->lock);
1434
1435 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1436 spin_lock(&q->lock);
1437 }
1438 spin_unlock(&q->lock);
1439
1440#if USE_GTS
1441 set_bit(TXQ_RUNNING, &q->flags);
1442 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1443#endif
1444 t3_write_reg(adap, A_SG_KDOORBELL,
1445 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1446}
1447
1448/**
1449 * queue_set - return the queue set a packet should use
1450 * @skb: the packet
1451 *
1452 * Maps a packet to the SGE queue set it should use. The desired queue
1453 * set is carried in bits 1-3 in the packet's priority.
1454 */
1455static inline int queue_set(const struct sk_buff *skb)
1456{
1457 return skb->priority >> 1;
1458}
1459
1460/**
1461 * is_ctrl_pkt - return whether an offload packet is a control packet
1462 * @skb: the packet
1463 *
1464 * Determines whether an offload packet should use an OFLD or a CTRL
1465 * Tx queue. This is indicated by bit 0 in the packet's priority.
1466 */
1467static inline int is_ctrl_pkt(const struct sk_buff *skb)
1468{
1469 return skb->priority & 1;
1470}
1471
1472/**
1473 * t3_offload_tx - send an offload packet
1474 * @tdev: the offload device to send to
1475 * @skb: the packet
1476 *
1477 * Sends an offload packet. We use the packet priority to select the
1478 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1479 * should be sent as regular or control, bits 1-3 select the queue set.
1480 */
1481int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1482{
1483 struct adapter *adap = tdev2adap(tdev);
1484 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1485
1486 if (unlikely(is_ctrl_pkt(skb)))
1487 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1488
1489 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1490}
1491
1492/**
1493 * offload_enqueue - add an offload packet to an SGE offload receive queue
1494 * @q: the SGE response queue
1495 * @skb: the packet
1496 *
1497 * Add a new offload packet to an SGE response queue's offload packet
1498 * queue. If the packet is the first on the queue it schedules the RX
1499 * softirq to process the queue.
1500 */
1501static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1502{
1503 skb->next = skb->prev = NULL;
1504 if (q->rx_tail)
1505 q->rx_tail->next = skb;
1506 else {
1507 struct sge_qset *qs = rspq_to_qset(q);
1508
1509 if (__netif_rx_schedule_prep(qs->netdev))
1510 __netif_rx_schedule(qs->netdev);
1511 q->rx_head = skb;
1512 }
1513 q->rx_tail = skb;
1514}
1515
1516/**
1517 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1518 * @tdev: the offload device that will be receiving the packets
1519 * @q: the SGE response queue that assembled the bundle
1520 * @skbs: the partial bundle
1521 * @n: the number of packets in the bundle
1522 *
1523 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1524 */
1525static inline void deliver_partial_bundle(struct t3cdev *tdev,
1526 struct sge_rspq *q,
1527 struct sk_buff *skbs[], int n)
1528{
1529 if (n) {
1530 q->offload_bundles++;
1531 tdev->recv(tdev, skbs, n);
1532 }
1533}
1534
1535/**
1536 * ofld_poll - NAPI handler for offload packets in interrupt mode
1537 * @dev: the network device doing the polling
1538 * @budget: polling budget
1539 *
1540 * The NAPI handler for offload packets when a response queue is serviced
1541 * by the hard interrupt handler, i.e., when it's operating in non-polling
1542 * mode. Creates small packet batches and sends them through the offload
1543 * receive handler. Batches need to be of modest size as we do prefetches
1544 * on the packets in each.
1545 */
1546static int ofld_poll(struct net_device *dev, int *budget)
1547{
1548 struct adapter *adapter = dev->priv;
1549 struct sge_qset *qs = dev2qset(dev);
1550 struct sge_rspq *q = &qs->rspq;
1551 int work_done, limit = min(*budget, dev->quota), avail = limit;
1552
1553 while (avail) {
1554 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1555 int ngathered;
1556
1557 spin_lock_irq(&q->lock);
1558 head = q->rx_head;
1559 if (!head) {
1560 work_done = limit - avail;
1561 *budget -= work_done;
1562 dev->quota -= work_done;
1563 __netif_rx_complete(dev);
1564 spin_unlock_irq(&q->lock);
1565 return 0;
1566 }
1567
1568 tail = q->rx_tail;
1569 q->rx_head = q->rx_tail = NULL;
1570 spin_unlock_irq(&q->lock);
1571
1572 for (ngathered = 0; avail && head; avail--) {
1573 prefetch(head->data);
1574 skbs[ngathered] = head;
1575 head = head->next;
1576 skbs[ngathered]->next = NULL;
1577 if (++ngathered == RX_BUNDLE_SIZE) {
1578 q->offload_bundles++;
1579 adapter->tdev.recv(&adapter->tdev, skbs,
1580 ngathered);
1581 ngathered = 0;
1582 }
1583 }
1584 if (head) { /* splice remaining packets back onto Rx queue */
1585 spin_lock_irq(&q->lock);
1586 tail->next = q->rx_head;
1587 if (!q->rx_head)
1588 q->rx_tail = tail;
1589 q->rx_head = head;
1590 spin_unlock_irq(&q->lock);
1591 }
1592 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1593 }
1594 work_done = limit - avail;
1595 *budget -= work_done;
1596 dev->quota -= work_done;
1597 return 1;
1598}
1599
1600/**
1601 * rx_offload - process a received offload packet
1602 * @tdev: the offload device receiving the packet
1603 * @rq: the response queue that received the packet
1604 * @skb: the packet
1605 * @rx_gather: a gather list of packets if we are building a bundle
1606 * @gather_idx: index of the next available slot in the bundle
1607 *
1608 * Process an ingress offload pakcet and add it to the offload ingress
1609 * queue. Returns the index of the next available slot in the bundle.
1610 */
1611static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1612 struct sk_buff *skb, struct sk_buff *rx_gather[],
1613 unsigned int gather_idx)
1614{
1615 rq->offload_pkts++;
1616 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1617
1618 if (rq->polling) {
1619 rx_gather[gather_idx++] = skb;
1620 if (gather_idx == RX_BUNDLE_SIZE) {
1621 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1622 gather_idx = 0;
1623 rq->offload_bundles++;
1624 }
1625 } else
1626 offload_enqueue(rq, skb);
1627
1628 return gather_idx;
1629}
1630
4d22de3e
DLR
1631/**
1632 * restart_tx - check whether to restart suspended Tx queues
1633 * @qs: the queue set to resume
1634 *
1635 * Restarts suspended Tx queues of an SGE queue set if they have enough
1636 * free resources to resume operation.
1637 */
1638static void restart_tx(struct sge_qset *qs)
1639{
1640 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1641 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1642 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1643 qs->txq[TXQ_ETH].restarts++;
1644 if (netif_running(qs->netdev))
1645 netif_wake_queue(qs->netdev);
1646 }
1647
1648 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1649 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1650 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1651 qs->txq[TXQ_OFLD].restarts++;
1652 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1653 }
1654 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1655 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1656 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1657 qs->txq[TXQ_CTRL].restarts++;
1658 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1659 }
1660}
1661
1662/**
1663 * rx_eth - process an ingress ethernet packet
1664 * @adap: the adapter
1665 * @rq: the response queue that received the packet
1666 * @skb: the packet
1667 * @pad: amount of padding at the start of the buffer
1668 *
1669 * Process an ingress ethernet pakcet and deliver it to the stack.
1670 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1671 * if it was immediate data in a response.
1672 */
1673static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1674 struct sk_buff *skb, int pad)
1675{
1676 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1677 struct port_info *pi;
1678
1679 rq->eth_pkts++;
1680 skb_pull(skb, sizeof(*p) + pad);
1681 skb->dev = adap->port[p->iff];
1682 skb->dev->last_rx = jiffies;
1683 skb->protocol = eth_type_trans(skb, skb->dev);
1684 pi = netdev_priv(skb->dev);
1685 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1686 !p->fragment) {
1687 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1688 skb->ip_summed = CHECKSUM_UNNECESSARY;
1689 } else
1690 skb->ip_summed = CHECKSUM_NONE;
1691
1692 if (unlikely(p->vlan_valid)) {
1693 struct vlan_group *grp = pi->vlan_grp;
1694
1695 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1696 if (likely(grp))
1697 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1698 rq->polling);
1699 else
1700 dev_kfree_skb_any(skb);
1701 } else if (rq->polling)
1702 netif_receive_skb(skb);
1703 else
1704 netif_rx(skb);
1705}
1706
1707/**
1708 * handle_rsp_cntrl_info - handles control information in a response
1709 * @qs: the queue set corresponding to the response
1710 * @flags: the response control flags
4d22de3e
DLR
1711 *
1712 * Handles the control information of an SGE response, such as GTS
1713 * indications and completion credits for the queue set's Tx queues.
6195c71d 1714 * HW coalesces credits, we don't do any extra SW coalescing.
4d22de3e 1715 */
6195c71d 1716static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
4d22de3e
DLR
1717{
1718 unsigned int credits;
1719
1720#if USE_GTS
1721 if (flags & F_RSPD_TXQ0_GTS)
1722 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1723#endif
1724
4d22de3e
DLR
1725 credits = G_RSPD_TXQ0_CR(flags);
1726 if (credits)
1727 qs->txq[TXQ_ETH].processed += credits;
1728
6195c71d
DLR
1729 credits = G_RSPD_TXQ2_CR(flags);
1730 if (credits)
1731 qs->txq[TXQ_CTRL].processed += credits;
1732
4d22de3e
DLR
1733# if USE_GTS
1734 if (flags & F_RSPD_TXQ1_GTS)
1735 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1736# endif
6195c71d
DLR
1737 credits = G_RSPD_TXQ1_CR(flags);
1738 if (credits)
1739 qs->txq[TXQ_OFLD].processed += credits;
4d22de3e
DLR
1740}
1741
1742/**
1743 * check_ring_db - check if we need to ring any doorbells
1744 * @adapter: the adapter
1745 * @qs: the queue set whose Tx queues are to be examined
1746 * @sleeping: indicates which Tx queue sent GTS
1747 *
1748 * Checks if some of a queue set's Tx queues need to ring their doorbells
1749 * to resume transmission after idling while they still have unprocessed
1750 * descriptors.
1751 */
1752static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1753 unsigned int sleeping)
1754{
1755 if (sleeping & F_RSPD_TXQ0_GTS) {
1756 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1757
1758 if (txq->cleaned + txq->in_use != txq->processed &&
1759 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1760 set_bit(TXQ_RUNNING, &txq->flags);
1761 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1762 V_EGRCNTX(txq->cntxt_id));
1763 }
1764 }
1765
1766 if (sleeping & F_RSPD_TXQ1_GTS) {
1767 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1768
1769 if (txq->cleaned + txq->in_use != txq->processed &&
1770 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1771 set_bit(TXQ_RUNNING, &txq->flags);
1772 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1773 V_EGRCNTX(txq->cntxt_id));
1774 }
1775 }
1776}
1777
1778/**
1779 * is_new_response - check if a response is newly written
1780 * @r: the response descriptor
1781 * @q: the response queue
1782 *
1783 * Returns true if a response descriptor contains a yet unprocessed
1784 * response.
1785 */
1786static inline int is_new_response(const struct rsp_desc *r,
1787 const struct sge_rspq *q)
1788{
1789 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1790}
1791
1792#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1793#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1794 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1795 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1796 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1797
1798/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1799#define NOMEM_INTR_DELAY 2500
1800
1801/**
1802 * process_responses - process responses from an SGE response queue
1803 * @adap: the adapter
1804 * @qs: the queue set to which the response queue belongs
1805 * @budget: how many responses can be processed in this round
1806 *
1807 * Process responses from an SGE response queue up to the supplied budget.
1808 * Responses include received packets as well as credits and other events
1809 * for the queues that belong to the response queue's queue set.
1810 * A negative budget is effectively unlimited.
1811 *
1812 * Additionally choose the interrupt holdoff time for the next interrupt
1813 * on this queue. If the system is under memory shortage use a fairly
1814 * long delay to help recovery.
1815 */
1816static int process_responses(struct adapter *adap, struct sge_qset *qs,
1817 int budget)
1818{
1819 struct sge_rspq *q = &qs->rspq;
1820 struct rsp_desc *r = &q->desc[q->cidx];
1821 int budget_left = budget;
6195c71d 1822 unsigned int sleeping = 0;
4d22de3e
DLR
1823 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1824 int ngathered = 0;
1825
1826 q->next_holdoff = q->holdoff_tmr;
1827
1828 while (likely(budget_left && is_new_response(r, q))) {
1829 int eth, ethpad = 0;
1830 struct sk_buff *skb = NULL;
1831 u32 len, flags = ntohl(r->flags);
1832 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1833
1834 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1835
1836 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1837 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1838 if (!skb)
1839 goto no_mem;
1840
1841 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1842 skb->data[0] = CPL_ASYNC_NOTIF;
1843 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1844 q->async_notif++;
1845 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1846 skb = get_imm_packet(r);
1847 if (unlikely(!skb)) {
1848 no_mem:
1849 q->next_holdoff = NOMEM_INTR_DELAY;
1850 q->nomem++;
1851 /* consume one credit since we tried */
1852 budget_left--;
1853 break;
1854 }
1855 q->imm_data++;
1856 } else if ((len = ntohl(r->len_cq)) != 0) {
1857 struct sge_fl *fl;
1858
1859 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1860 fl->credits--;
1861 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1862 eth ? SGE_RX_DROP_THRES : 0);
1863 if (!skb)
1864 q->rx_drops++;
1865 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1866 __skb_pull(skb, 2);
1867 ethpad = 2;
1868 if (++fl->cidx == fl->size)
1869 fl->cidx = 0;
1870 } else
1871 q->pure_rsps++;
1872
1873 if (flags & RSPD_CTRL_MASK) {
1874 sleeping |= flags & RSPD_GTS_MASK;
6195c71d 1875 handle_rsp_cntrl_info(qs, flags);
4d22de3e
DLR
1876 }
1877
1878 r++;
1879 if (unlikely(++q->cidx == q->size)) {
1880 q->cidx = 0;
1881 q->gen ^= 1;
1882 r = q->desc;
1883 }
1884 prefetch(r);
1885
1886 if (++q->credits >= (q->size / 4)) {
1887 refill_rspq(adap, q, q->credits);
1888 q->credits = 0;
1889 }
1890
1891 if (likely(skb != NULL)) {
1892 if (eth)
1893 rx_eth(adap, q, skb, ethpad);
1894 else {
1895 /* Preserve the RSS info in csum & priority */
1896 skb->csum = rss_hi;
1897 skb->priority = rss_lo;
1898 ngathered = rx_offload(&adap->tdev, q, skb,
1899 offload_skbs, ngathered);
1900 }
1901 }
1902
1903 --budget_left;
1904 }
1905
4d22de3e
DLR
1906 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1907 if (sleeping)
1908 check_ring_db(adap, qs, sleeping);
1909
1910 smp_mb(); /* commit Tx queue .processed updates */
1911 if (unlikely(qs->txq_stopped != 0))
1912 restart_tx(qs);
1913
1914 budget -= budget_left;
1915 return budget;
1916}
1917
1918static inline int is_pure_response(const struct rsp_desc *r)
1919{
1920 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1921
1922 return (n | r->len_cq) == 0;
1923}
1924
1925/**
1926 * napi_rx_handler - the NAPI handler for Rx processing
1927 * @dev: the net device
1928 * @budget: how many packets we can process in this round
1929 *
1930 * Handler for new data events when using NAPI.
1931 */
1932static int napi_rx_handler(struct net_device *dev, int *budget)
1933{
1934 struct adapter *adap = dev->priv;
1935 struct sge_qset *qs = dev2qset(dev);
1936 int effective_budget = min(*budget, dev->quota);
1937
1938 int work_done = process_responses(adap, qs, effective_budget);
1939 *budget -= work_done;
1940 dev->quota -= work_done;
1941
1942 if (work_done >= effective_budget)
1943 return 1;
1944
1945 netif_rx_complete(dev);
1946
1947 /*
1948 * Because we don't atomically flush the following write it is
1949 * possible that in very rare cases it can reach the device in a way
1950 * that races with a new response being written plus an error interrupt
1951 * causing the NAPI interrupt handler below to return unhandled status
1952 * to the OS. To protect against this would require flushing the write
1953 * and doing both the write and the flush with interrupts off. Way too
1954 * expensive and unjustifiable given the rarity of the race.
1955 *
1956 * The race cannot happen at all with MSI-X.
1957 */
1958 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1959 V_NEWTIMER(qs->rspq.next_holdoff) |
1960 V_NEWINDEX(qs->rspq.cidx));
1961 return 0;
1962}
1963
1964/*
1965 * Returns true if the device is already scheduled for polling.
1966 */
1967static inline int napi_is_scheduled(struct net_device *dev)
1968{
1969 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1970}
1971
1972/**
1973 * process_pure_responses - process pure responses from a response queue
1974 * @adap: the adapter
1975 * @qs: the queue set owning the response queue
1976 * @r: the first pure response to process
1977 *
1978 * A simpler version of process_responses() that handles only pure (i.e.,
1979 * non data-carrying) responses. Such respones are too light-weight to
1980 * justify calling a softirq under NAPI, so we handle them specially in
1981 * the interrupt handler. The function is called with a pointer to a
1982 * response, which the caller must ensure is a valid pure response.
1983 *
1984 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1985 */
1986static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1987 struct rsp_desc *r)
1988{
1989 struct sge_rspq *q = &qs->rspq;
6195c71d 1990 unsigned int sleeping = 0;
4d22de3e
DLR
1991
1992 do {
1993 u32 flags = ntohl(r->flags);
1994
1995 r++;
1996 if (unlikely(++q->cidx == q->size)) {
1997 q->cidx = 0;
1998 q->gen ^= 1;
1999 r = q->desc;
2000 }
2001 prefetch(r);
2002
2003 if (flags & RSPD_CTRL_MASK) {
2004 sleeping |= flags & RSPD_GTS_MASK;
6195c71d 2005 handle_rsp_cntrl_info(qs, flags);
4d22de3e
DLR
2006 }
2007
2008 q->pure_rsps++;
2009 if (++q->credits >= (q->size / 4)) {
2010 refill_rspq(adap, q, q->credits);
2011 q->credits = 0;
2012 }
2013 } while (is_new_response(r, q) && is_pure_response(r));
2014
4d22de3e
DLR
2015 if (sleeping)
2016 check_ring_db(adap, qs, sleeping);
2017
2018 smp_mb(); /* commit Tx queue .processed updates */
2019 if (unlikely(qs->txq_stopped != 0))
2020 restart_tx(qs);
2021
2022 return is_new_response(r, q);
2023}
2024
2025/**
2026 * handle_responses - decide what to do with new responses in NAPI mode
2027 * @adap: the adapter
2028 * @q: the response queue
2029 *
2030 * This is used by the NAPI interrupt handlers to decide what to do with
2031 * new SGE responses. If there are no new responses it returns -1. If
2032 * there are new responses and they are pure (i.e., non-data carrying)
2033 * it handles them straight in hard interrupt context as they are very
2034 * cheap and don't deliver any packets. Finally, if there are any data
2035 * signaling responses it schedules the NAPI handler. Returns 1 if it
2036 * schedules NAPI, 0 if all new responses were pure.
2037 *
2038 * The caller must ascertain NAPI is not already running.
2039 */
2040static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2041{
2042 struct sge_qset *qs = rspq_to_qset(q);
2043 struct rsp_desc *r = &q->desc[q->cidx];
2044
2045 if (!is_new_response(r, q))
2046 return -1;
2047 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2048 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2049 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2050 return 0;
2051 }
2052 if (likely(__netif_rx_schedule_prep(qs->netdev)))
2053 __netif_rx_schedule(qs->netdev);
2054 return 1;
2055}
2056
2057/*
2058 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2059 * (i.e., response queue serviced in hard interrupt).
2060 */
2061irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2062{
2063 struct sge_qset *qs = cookie;
2064 struct adapter *adap = qs->netdev->priv;
2065 struct sge_rspq *q = &qs->rspq;
2066
2067 spin_lock(&q->lock);
2068 if (process_responses(adap, qs, -1) == 0)
2069 q->unhandled_irqs++;
2070 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2071 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2072 spin_unlock(&q->lock);
2073 return IRQ_HANDLED;
2074}
2075
2076/*
2077 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2078 * (i.e., response queue serviced by NAPI polling).
2079 */
2080irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2081{
2082 struct sge_qset *qs = cookie;
2083 struct adapter *adap = qs->netdev->priv;
2084 struct sge_rspq *q = &qs->rspq;
2085
2086 spin_lock(&q->lock);
2087 BUG_ON(napi_is_scheduled(qs->netdev));
2088
2089 if (handle_responses(adap, q) < 0)
2090 q->unhandled_irqs++;
2091 spin_unlock(&q->lock);
2092 return IRQ_HANDLED;
2093}
2094
2095/*
2096 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2097 * SGE response queues as well as error and other async events as they all use
2098 * the same MSI vector. We use one SGE response queue per port in this mode
2099 * and protect all response queues with queue 0's lock.
2100 */
2101static irqreturn_t t3_intr_msi(int irq, void *cookie)
2102{
2103 int new_packets = 0;
2104 struct adapter *adap = cookie;
2105 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2106
2107 spin_lock(&q->lock);
2108
2109 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2110 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2111 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2112 new_packets = 1;
2113 }
2114
2115 if (adap->params.nports == 2 &&
2116 process_responses(adap, &adap->sge.qs[1], -1)) {
2117 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2118
2119 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2120 V_NEWTIMER(q1->next_holdoff) |
2121 V_NEWINDEX(q1->cidx));
2122 new_packets = 1;
2123 }
2124
2125 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2126 q->unhandled_irqs++;
2127
2128 spin_unlock(&q->lock);
2129 return IRQ_HANDLED;
2130}
2131
2132static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2133{
2134 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2135 if (likely(__netif_rx_schedule_prep(dev)))
2136 __netif_rx_schedule(dev);
2137 return 1;
2138 }
2139 return 0;
2140}
2141
2142/*
2143 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2144 * by NAPI polling). Handles data events from SGE response queues as well as
2145 * error and other async events as they all use the same MSI vector. We use
2146 * one SGE response queue per port in this mode and protect all response
2147 * queues with queue 0's lock.
2148 */
2149irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2150{
2151 int new_packets;
2152 struct adapter *adap = cookie;
2153 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2154
2155 spin_lock(&q->lock);
2156
2157 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2158 if (adap->params.nports == 2)
2159 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2160 &adap->sge.qs[1].rspq);
2161 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2162 q->unhandled_irqs++;
2163
2164 spin_unlock(&q->lock);
2165 return IRQ_HANDLED;
2166}
2167
2168/*
2169 * A helper function that processes responses and issues GTS.
2170 */
2171static inline int process_responses_gts(struct adapter *adap,
2172 struct sge_rspq *rq)
2173{
2174 int work;
2175
2176 work = process_responses(adap, rspq_to_qset(rq), -1);
2177 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2178 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2179 return work;
2180}
2181
2182/*
2183 * The legacy INTx interrupt handler. This needs to handle data events from
2184 * SGE response queues as well as error and other async events as they all use
2185 * the same interrupt pin. We use one SGE response queue per port in this mode
2186 * and protect all response queues with queue 0's lock.
2187 */
2188static irqreturn_t t3_intr(int irq, void *cookie)
2189{
2190 int work_done, w0, w1;
2191 struct adapter *adap = cookie;
2192 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2193 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2194
2195 spin_lock(&q0->lock);
2196
2197 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2198 w1 = adap->params.nports == 2 &&
2199 is_new_response(&q1->desc[q1->cidx], q1);
2200
2201 if (likely(w0 | w1)) {
2202 t3_write_reg(adap, A_PL_CLI, 0);
2203 t3_read_reg(adap, A_PL_CLI); /* flush */
2204
2205 if (likely(w0))
2206 process_responses_gts(adap, q0);
2207
2208 if (w1)
2209 process_responses_gts(adap, q1);
2210
2211 work_done = w0 | w1;
2212 } else
2213 work_done = t3_slow_intr_handler(adap);
2214
2215 spin_unlock(&q0->lock);
2216 return IRQ_RETVAL(work_done != 0);
2217}
2218
2219/*
2220 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2221 * Handles data events from SGE response queues as well as error and other
2222 * async events as they all use the same interrupt pin. We use one SGE
2223 * response queue per port in this mode and protect all response queues with
2224 * queue 0's lock.
2225 */
2226static irqreturn_t t3b_intr(int irq, void *cookie)
2227{
2228 u32 map;
2229 struct adapter *adap = cookie;
2230 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2231
2232 t3_write_reg(adap, A_PL_CLI, 0);
2233 map = t3_read_reg(adap, A_SG_DATA_INTR);
2234
2235 if (unlikely(!map)) /* shared interrupt, most likely */
2236 return IRQ_NONE;
2237
2238 spin_lock(&q0->lock);
2239
2240 if (unlikely(map & F_ERRINTR))
2241 t3_slow_intr_handler(adap);
2242
2243 if (likely(map & 1))
2244 process_responses_gts(adap, q0);
2245
2246 if (map & 2)
2247 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2248
2249 spin_unlock(&q0->lock);
2250 return IRQ_HANDLED;
2251}
2252
2253/*
2254 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2255 * Handles data events from SGE response queues as well as error and other
2256 * async events as they all use the same interrupt pin. We use one SGE
2257 * response queue per port in this mode and protect all response queues with
2258 * queue 0's lock.
2259 */
2260static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2261{
2262 u32 map;
2263 struct net_device *dev;
2264 struct adapter *adap = cookie;
2265 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2266
2267 t3_write_reg(adap, A_PL_CLI, 0);
2268 map = t3_read_reg(adap, A_SG_DATA_INTR);
2269
2270 if (unlikely(!map)) /* shared interrupt, most likely */
2271 return IRQ_NONE;
2272
2273 spin_lock(&q0->lock);
2274
2275 if (unlikely(map & F_ERRINTR))
2276 t3_slow_intr_handler(adap);
2277
2278 if (likely(map & 1)) {
2279 dev = adap->sge.qs[0].netdev;
2280
4d22de3e
DLR
2281 if (likely(__netif_rx_schedule_prep(dev)))
2282 __netif_rx_schedule(dev);
2283 }
2284 if (map & 2) {
2285 dev = adap->sge.qs[1].netdev;
2286
4d22de3e
DLR
2287 if (likely(__netif_rx_schedule_prep(dev)))
2288 __netif_rx_schedule(dev);
2289 }
2290
2291 spin_unlock(&q0->lock);
2292 return IRQ_HANDLED;
2293}
2294
2295/**
2296 * t3_intr_handler - select the top-level interrupt handler
2297 * @adap: the adapter
2298 * @polling: whether using NAPI to service response queues
2299 *
2300 * Selects the top-level interrupt handler based on the type of interrupts
2301 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2302 * response queues.
2303 */
2304intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2305{
2306 if (adap->flags & USING_MSIX)
2307 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2308 if (adap->flags & USING_MSI)
2309 return polling ? t3_intr_msi_napi : t3_intr_msi;
2310 if (adap->params.rev > 0)
2311 return polling ? t3b_intr_napi : t3b_intr;
2312 return t3_intr;
2313}
2314
2315/**
2316 * t3_sge_err_intr_handler - SGE async event interrupt handler
2317 * @adapter: the adapter
2318 *
2319 * Interrupt handler for SGE asynchronous (non-data) events.
2320 */
2321void t3_sge_err_intr_handler(struct adapter *adapter)
2322{
2323 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2324
2325 if (status & F_RSPQCREDITOVERFOW)
2326 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2327
2328 if (status & F_RSPQDISABLED) {
2329 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2330
2331 CH_ALERT(adapter,
2332 "packet delivered to disabled response queue "
2333 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2334 }
2335
2336 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2337 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2338 t3_fatal_err(adapter);
2339}
2340
2341/**
2342 * sge_timer_cb - perform periodic maintenance of an SGE qset
2343 * @data: the SGE queue set to maintain
2344 *
2345 * Runs periodically from a timer to perform maintenance of an SGE queue
2346 * set. It performs two tasks:
2347 *
2348 * a) Cleans up any completed Tx descriptors that may still be pending.
2349 * Normal descriptor cleanup happens when new packets are added to a Tx
2350 * queue so this timer is relatively infrequent and does any cleanup only
2351 * if the Tx queue has not seen any new packets in a while. We make a
2352 * best effort attempt to reclaim descriptors, in that we don't wait
2353 * around if we cannot get a queue's lock (which most likely is because
2354 * someone else is queueing new packets and so will also handle the clean
2355 * up). Since control queues use immediate data exclusively we don't
2356 * bother cleaning them up here.
2357 *
2358 * b) Replenishes Rx queues that have run out due to memory shortage.
2359 * Normally new Rx buffers are added when existing ones are consumed but
2360 * when out of memory a queue can become empty. We try to add only a few
2361 * buffers here, the queue will be replenished fully as these new buffers
2362 * are used up if memory shortage has subsided.
2363 */
2364static void sge_timer_cb(unsigned long data)
2365{
2366 spinlock_t *lock;
2367 struct sge_qset *qs = (struct sge_qset *)data;
2368 struct adapter *adap = qs->netdev->priv;
2369
2370 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2371 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2372 spin_unlock(&qs->txq[TXQ_ETH].lock);
2373 }
2374 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2375 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2376 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2377 }
2378 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2379 &adap->sge.qs[0].rspq.lock;
2380 if (spin_trylock_irq(lock)) {
2381 if (!napi_is_scheduled(qs->netdev)) {
2382 if (qs->fl[0].credits < qs->fl[0].size)
2383 __refill_fl(adap, &qs->fl[0]);
2384 if (qs->fl[1].credits < qs->fl[1].size)
2385 __refill_fl(adap, &qs->fl[1]);
2386 }
2387 spin_unlock_irq(lock);
2388 }
2389 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2390}
2391
2392/**
2393 * t3_update_qset_coalesce - update coalescing settings for a queue set
2394 * @qs: the SGE queue set
2395 * @p: new queue set parameters
2396 *
2397 * Update the coalescing settings for an SGE queue set. Nothing is done
2398 * if the queue set is not initialized yet.
2399 */
2400void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2401{
2402 if (!qs->netdev)
2403 return;
2404
2405 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2406 qs->rspq.polling = p->polling;
2407 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2408}
2409
2410/**
2411 * t3_sge_alloc_qset - initialize an SGE queue set
2412 * @adapter: the adapter
2413 * @id: the queue set id
2414 * @nports: how many Ethernet ports will be using this queue set
2415 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2416 * @p: configuration parameters for this queue set
2417 * @ntxq: number of Tx queues for the queue set
2418 * @netdev: net device associated with this queue set
2419 *
2420 * Allocate resources and initialize an SGE queue set. A queue set
2421 * comprises a response queue, two Rx free-buffer queues, and up to 3
2422 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2423 * queue, offload queue, and control queue.
2424 */
2425int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2426 int irq_vec_idx, const struct qset_params *p,
2427 int ntxq, struct net_device *netdev)
2428{
2429 int i, ret = -ENOMEM;
2430 struct sge_qset *q = &adapter->sge.qs[id];
2431
2432 init_qset_cntxt(q, id);
2433 init_timer(&q->tx_reclaim_timer);
2434 q->tx_reclaim_timer.data = (unsigned long)q;
2435 q->tx_reclaim_timer.function = sge_timer_cb;
2436
2437 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2438 sizeof(struct rx_desc),
2439 sizeof(struct rx_sw_desc),
2440 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2441 if (!q->fl[0].desc)
2442 goto err;
2443
2444 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2445 sizeof(struct rx_desc),
2446 sizeof(struct rx_sw_desc),
2447 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2448 if (!q->fl[1].desc)
2449 goto err;
2450
2451 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2452 sizeof(struct rsp_desc), 0,
2453 &q->rspq.phys_addr, NULL);
2454 if (!q->rspq.desc)
2455 goto err;
2456
2457 for (i = 0; i < ntxq; ++i) {
2458 /*
2459 * The control queue always uses immediate data so does not
2460 * need to keep track of any sk_buffs.
2461 */
2462 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2463
2464 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2465 sizeof(struct tx_desc), sz,
2466 &q->txq[i].phys_addr,
2467 &q->txq[i].sdesc);
2468 if (!q->txq[i].desc)
2469 goto err;
2470
2471 q->txq[i].gen = 1;
2472 q->txq[i].size = p->txq_size[i];
2473 spin_lock_init(&q->txq[i].lock);
2474 skb_queue_head_init(&q->txq[i].sendq);
2475 }
2476
2477 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2478 (unsigned long)q);
2479 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2480 (unsigned long)q);
2481
2482 q->fl[0].gen = q->fl[1].gen = 1;
2483 q->fl[0].size = p->fl_size;
2484 q->fl[1].size = p->jumbo_size;
2485
2486 q->rspq.gen = 1;
2487 q->rspq.size = p->rspq_size;
2488 spin_lock_init(&q->rspq.lock);
2489
2490 q->txq[TXQ_ETH].stop_thres = nports *
2491 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2492
2493 if (ntxq == 1) {
2494 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2495 sizeof(struct cpl_rx_pkt);
2496 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2497 sizeof(struct cpl_rx_pkt);
2498 } else {
2499 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2500 sizeof(struct cpl_rx_data);
2501 q->fl[1].buf_size = (16 * 1024) -
2502 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2503 }
2504
2505 spin_lock(&adapter->sge.reg_lock);
2506
2507 /* FL threshold comparison uses < */
2508 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2509 q->rspq.phys_addr, q->rspq.size,
2510 q->fl[0].buf_size, 1, 0);
2511 if (ret)
2512 goto err_unlock;
2513
2514 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2515 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2516 q->fl[i].phys_addr, q->fl[i].size,
2517 q->fl[i].buf_size, p->cong_thres, 1,
2518 0);
2519 if (ret)
2520 goto err_unlock;
2521 }
2522
2523 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2524 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2525 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2526 1, 0);
2527 if (ret)
2528 goto err_unlock;
2529
2530 if (ntxq > 1) {
2531 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2532 USE_GTS, SGE_CNTXT_OFLD, id,
2533 q->txq[TXQ_OFLD].phys_addr,
2534 q->txq[TXQ_OFLD].size, 0, 1, 0);
2535 if (ret)
2536 goto err_unlock;
2537 }
2538
2539 if (ntxq > 2) {
2540 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2541 SGE_CNTXT_CTRL, id,
2542 q->txq[TXQ_CTRL].phys_addr,
2543 q->txq[TXQ_CTRL].size,
2544 q->txq[TXQ_CTRL].token, 1, 0);
2545 if (ret)
2546 goto err_unlock;
2547 }
2548
2549 spin_unlock(&adapter->sge.reg_lock);
2550 q->netdev = netdev;
2551 t3_update_qset_coalesce(q, p);
2552
2553 /*
2554 * We use atalk_ptr as a backpointer to a qset. In case a device is
2555 * associated with multiple queue sets only the first one sets
2556 * atalk_ptr.
2557 */
2558 if (netdev->atalk_ptr == NULL)
2559 netdev->atalk_ptr = q;
2560
2561 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2562 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2563 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2564
2565 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2566 V_NEWTIMER(q->rspq.holdoff_tmr));
2567
2568 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2569 return 0;
2570
2571 err_unlock:
2572 spin_unlock(&adapter->sge.reg_lock);
2573 err:
2574 t3_free_qset(adapter, q);
2575 return ret;
2576}
2577
2578/**
2579 * t3_free_sge_resources - free SGE resources
2580 * @adap: the adapter
2581 *
2582 * Frees resources used by the SGE queue sets.
2583 */
2584void t3_free_sge_resources(struct adapter *adap)
2585{
2586 int i;
2587
2588 for (i = 0; i < SGE_QSETS; ++i)
2589 t3_free_qset(adap, &adap->sge.qs[i]);
2590}
2591
2592/**
2593 * t3_sge_start - enable SGE
2594 * @adap: the adapter
2595 *
2596 * Enables the SGE for DMAs. This is the last step in starting packet
2597 * transfers.
2598 */
2599void t3_sge_start(struct adapter *adap)
2600{
2601 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2602}
2603
2604/**
2605 * t3_sge_stop - disable SGE operation
2606 * @adap: the adapter
2607 *
2608 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2609 * from error interrupts) or from normal process context. In the latter
2610 * case it also disables any pending queue restart tasklets. Note that
2611 * if it is called in interrupt context it cannot disable the restart
2612 * tasklets as it cannot wait, however the tasklets will have no effect
2613 * since the doorbells are disabled and the driver will call this again
2614 * later from process context, at which time the tasklets will be stopped
2615 * if they are still running.
2616 */
2617void t3_sge_stop(struct adapter *adap)
2618{
2619 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2620 if (!in_interrupt()) {
2621 int i;
2622
2623 for (i = 0; i < SGE_QSETS; ++i) {
2624 struct sge_qset *qs = &adap->sge.qs[i];
2625
2626 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2627 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2628 }
2629 }
2630}
2631
2632/**
2633 * t3_sge_init - initialize SGE
2634 * @adap: the adapter
2635 * @p: the SGE parameters
2636 *
2637 * Performs SGE initialization needed every time after a chip reset.
2638 * We do not initialize any of the queue sets here, instead the driver
2639 * top-level must request those individually. We also do not enable DMA
2640 * here, that should be done after the queues have been set up.
2641 */
2642void t3_sge_init(struct adapter *adap, struct sge_params *p)
2643{
2644 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2645
2646 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2647 F_CQCRDTCTRL |
2648 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2649 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2650#if SGE_NUM_GENBITS == 1
2651 ctrl |= F_EGRGENCTRL;
2652#endif
2653 if (adap->params.rev > 0) {
2654 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2655 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2656 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2657 }
2658 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2659 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2660 V_LORCQDRBTHRSH(512));
2661 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2662 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
6195c71d 2663 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
4d22de3e
DLR
2664 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2665 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2666 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2667 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2668 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2669 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2670}
2671
2672/**
2673 * t3_sge_prep - one-time SGE initialization
2674 * @adap: the associated adapter
2675 * @p: SGE parameters
2676 *
2677 * Performs one-time initialization of SGE SW state. Includes determining
2678 * defaults for the assorted SGE parameters, which admins can change until
2679 * they are used to initialize the SGE.
2680 */
2681void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2682{
2683 int i;
2684
2685 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2686 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2687
2688 for (i = 0; i < SGE_QSETS; ++i) {
2689 struct qset_params *q = p->qset + i;
2690
2691 q->polling = adap->params.rev > 0;
2692 q->coalesce_usecs = 5;
2693 q->rspq_size = 1024;
2694 q->fl_size = 4096;
2695 q->jumbo_size = 512;
2696 q->txq_size[TXQ_ETH] = 1024;
2697 q->txq_size[TXQ_OFLD] = 1024;
2698 q->txq_size[TXQ_CTRL] = 256;
2699 q->cong_thres = 0;
2700 }
2701
2702 spin_lock_init(&adap->sge.reg_lock);
2703}
2704
2705/**
2706 * t3_get_desc - dump an SGE descriptor for debugging purposes
2707 * @qs: the queue set
2708 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2709 * @idx: the descriptor index in the queue
2710 * @data: where to dump the descriptor contents
2711 *
2712 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2713 * size of the descriptor.
2714 */
2715int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2716 unsigned char *data)
2717{
2718 if (qnum >= 6)
2719 return -EINVAL;
2720
2721 if (qnum < 3) {
2722 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2723 return -EINVAL;
2724 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2725 return sizeof(struct tx_desc);
2726 }
2727
2728 if (qnum == 3) {
2729 if (!qs->rspq.desc || idx >= qs->rspq.size)
2730 return -EINVAL;
2731 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2732 return sizeof(struct rsp_desc);
2733 }
2734
2735 qnum -= 4;
2736 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2737 return -EINVAL;
2738 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2739 return sizeof(struct rx_desc);
2740}