2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/compiler.h>
38 #include <linux/slab.h>
40 #include <linux/cache.h>
42 #include "t4_values.h"
46 #include "csio_defs.h"
48 int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */
49 static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */
51 int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
52 static int csio_sge_timer_reg = 1;
54 #define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
55 csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg##_A)
58 csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
60 sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A +
61 reg * sizeof(uint32_t));
64 /* Free list buffer size */
65 static inline uint32_t
66 csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
68 return sge->sge_fl_buf_size[buf->paddr & 0xF];
71 /* Size of the egress queue status page */
72 static inline uint32_t
73 csio_wr_qstat_pgsz(struct csio_hw *hw)
75 return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
78 /* Ring freelist doorbell */
80 csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
83 * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
84 * number of bytes in the freelist queue. This translates to atleast
85 * 8 freelist buffer pointers (since each pointer is 8 bytes).
87 if (flq->inc_idx >= 8) {
88 csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) |
89 PIDX_T5_V(flq->inc_idx / 8) | DBTYPE_F,
90 MYPF_REG(SGE_PF_KDOORBELL_A));
95 /* Write a 0 cidx increment value to enable SGE interrupts for this queue */
97 csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
99 csio_wr_reg32(hw, CIDXINC_V(0) |
101 TIMERREG_V(X_TIMERREG_RESTART_COUNTER),
102 MYPF_REG(SGE_PF_GTS_A));
106 * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
108 * @flq: Freelist queue.
110 * Fill up freelist buffer entries with buffers of size specified
111 * in the size register.
115 csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
117 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
118 struct csio_sge *sge = &wrm->sge;
119 __be64 *d = (__be64 *)(flq->vstart);
120 struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
122 int sreg = flq->un.fl.sreg;
123 int n = flq->credits;
126 buf->len = sge->sge_fl_buf_size[sreg];
127 buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
128 &buf->paddr, GFP_KERNEL);
130 csio_err(hw, "Could only fill %d buffers!\n", n + 1);
134 paddr = buf->paddr | (sreg & 0xF);
136 *d++ = cpu_to_be64(paddr);
144 * csio_wr_update_fl -
146 * @flq: Freelist queue.
151 csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
156 if (unlikely(flq->pidx >= flq->credits))
157 flq->pidx -= (uint16_t)flq->credits;
159 CSIO_INC_STATS(flq, n_flq_refill);
163 * csio_wr_alloc_q - Allocate a WR queue and initialize it.
165 * @qsize: Size of the queue in bytes
166 * @wrsize: Since of WR in this queue, if fixed.
167 * @type: Type of queue (Ingress/Egress/Freelist)
168 * @owner: Module that owns this queue.
169 * @nflb: Number of freelist buffers for FL.
170 * @sreg: What is the FL buffer size register?
171 * @iq_int_handler: Ingress queue handler in INTx mode.
173 * This function allocates and sets up a queue for the caller
174 * of size qsize, aligned at the required boundary. This is subject to
175 * be free entries being available in the queue array. If one is found,
176 * it is initialized with the allocated queue, marked as being used (owner),
177 * and a handle returned to the caller in form of the queue's index
178 * into the q_arr array.
179 * If user has indicated a freelist (by specifying nflb > 0), create
180 * another queue (with its own index into q_arr) for the freelist. Allocate
181 * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
182 * idx in the ingress queue's flq.idx. This is how a Freelist is associated
183 * with its owning ingress queue.
186 csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
187 uint16_t type, void *owner, uint32_t nflb, int sreg,
188 iq_handler_t iq_intx_handler)
190 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
191 struct csio_q *q, *flq;
192 int free_idx = wrm->free_qidx;
193 int ret_idx = free_idx;
197 if (free_idx >= wrm->num_q) {
198 csio_err(hw, "No more free queues.\n");
204 qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
214 csio_err(hw, "Invalid Ingress queue WR size:%d\n",
220 * Number of elements must be a multiple of 16
221 * So this includes status page size
223 qsz = ALIGN(qsize/wrsize, 16) * wrsize;
227 qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
230 csio_err(hw, "Invalid queue type: 0x%x\n", type);
234 q = wrm->q_arr[free_idx];
236 q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
240 "Failed to allocate DMA memory for "
241 "queue at id: %d size: %d\n", free_idx, qsize);
247 q->pidx = q->cidx = q->inc_idx = 0;
249 q->wr_sz = wrsize; /* If using fixed size WRs */
253 if (type == CSIO_INGRESS) {
254 /* Since queue area is set to zero */
258 * Ingress queue status page size is always the size of
259 * the ingress queue entry.
261 q->credits = (qsz - q->wr_sz) / q->wr_sz;
262 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
265 /* Allocate memory for FL if requested */
267 flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
268 sizeof(__be64), CSIO_FREELIST,
269 owner, 0, sreg, NULL);
272 "Failed to allocate FL queue"
273 " for IQ idx:%d\n", free_idx);
277 /* Associate the new FL with the Ingress quue */
278 q->un.iq.flq_idx = flq_idx;
280 flq = wrm->q_arr[q->un.iq.flq_idx];
281 flq->un.fl.bufs = kcalloc(flq->credits,
282 sizeof(struct csio_dma_buf),
284 if (!flq->un.fl.bufs) {
286 "Failed to allocate FL queue bufs"
287 " for IQ idx:%d\n", free_idx);
291 flq->un.fl.packen = 0;
292 flq->un.fl.offset = 0;
293 flq->un.fl.sreg = sreg;
295 /* Fill up the free list buffers */
296 if (csio_wr_fill_fl(hw, flq))
300 * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
301 * remains unpopulated,otherwise HW thinks
304 flq->pidx = flq->inc_idx = flq->credits - 8;
306 q->un.iq.flq_idx = -1;
309 /* Associate the IQ INTx handler. */
310 q->un.iq.iq_intx_handler = iq_intx_handler;
312 csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
314 } else if (type == CSIO_EGRESS) {
315 q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
316 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
317 - csio_wr_qstat_pgsz(hw));
318 csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
319 } else { /* Freelist */
320 q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
321 q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
322 - csio_wr_qstat_pgsz(hw));
323 csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
330 * csio_wr_iq_create_rsp - Response handler for IQ creation.
331 * @hw: The HW module.
333 * @iq_idx: Ingress queue that got created.
335 * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
338 csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
340 struct csio_iq_params iqp;
341 enum fw_retval retval;
345 memset(&iqp, 0, sizeof(struct csio_iq_params));
347 csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
349 if (retval != FW_SUCCESS) {
350 csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
351 mempool_free(mbp, hw->mb_mempool);
355 csio_q_iqid(hw, iq_idx) = iqp.iqid;
356 csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
357 csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
358 csio_q_inc_idx(hw, iq_idx) = 0;
361 iq_id = iqp.iqid - hw->wrm.fw_iq_start;
363 /* Set the iq-id to iq map table. */
364 if (iq_id >= CSIO_MAX_IQ) {
366 "Exceeding MAX_IQ(%d) supported!"
367 " iqid:%d rel_iqid:%d FW iq_start:%d\n",
368 CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
369 mempool_free(mbp, hw->mb_mempool);
372 csio_q_set_intr_map(hw, iq_idx, iq_id);
375 * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
376 * ingress context of this queue. This will block interrupts to
377 * this queue until the next GTS write. Therefore, we do a
378 * 0-cidx increment GTS write for this queue just to clear the
379 * interrupt_sent bit. This will re-enable interrupts to this
382 csio_wr_sge_intr_enable(hw, iqp.physiqid);
384 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
386 struct csio_q *flq = hw->wrm.q_arr[flq_idx];
388 csio_q_flid(hw, flq_idx) = iqp.fl0id;
389 csio_q_cidx(hw, flq_idx) = 0;
390 csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
391 csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
393 /* Now update SGE about the buffers allocated during init */
394 csio_wr_ring_fldb(hw, flq);
397 mempool_free(mbp, hw->mb_mempool);
403 * csio_wr_iq_create - Configure an Ingress queue with FW.
404 * @hw: The HW module.
405 * @priv: Private data object.
406 * @iq_idx: Ingress queue index in the WR module.
408 * @portid: PCIE Channel to be associated with this queue.
409 * @async: Is this a FW asynchronous message handling queue?
410 * @cbfn: Completion callback.
412 * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
413 * with alloc/write bits set.
416 csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
417 uint32_t vec, uint8_t portid, bool async,
418 void (*cbfn) (struct csio_hw *, struct csio_mb *))
421 struct csio_iq_params iqp;
424 memset(&iqp, 0, sizeof(struct csio_iq_params));
425 csio_q_portid(hw, iq_idx) = portid;
427 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
429 csio_err(hw, "IQ command out of memory!\n");
433 switch (hw->intr_mode) {
436 /* For interrupt forwarding queue only */
437 if (hw->intr_iq_idx == iq_idx)
438 iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
440 iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
442 csio_q_physiqid(hw, hw->intr_iq_idx);
445 iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
446 iqp.iqandstindex = (uint16_t)vec;
449 mempool_free(mbp, hw->mb_mempool);
453 /* Pass in the ingress queue cmd parameters */
458 iqp.type = FW_IQ_TYPE_FL_INT_CAP;
459 iqp.iqasynch = async;
460 if (csio_intr_coalesce_cnt)
461 iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
463 iqp.iqanus = X_UPDATESCHEDULING_TIMER;
464 iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
465 iqp.iqpciech = portid;
466 iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
468 switch (csio_q_wr_sz(hw, iq_idx)) {
470 iqp.iqesize = 0; break;
472 iqp.iqesize = 1; break;
474 iqp.iqesize = 2; break;
476 iqp.iqesize = 3; break;
479 iqp.iqsize = csio_q_size(hw, iq_idx) /
480 csio_q_wr_sz(hw, iq_idx);
481 iqp.iqaddr = csio_q_pstart(hw, iq_idx);
483 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
485 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
486 struct csio_q *flq = hw->wrm.q_arr[flq_idx];
489 iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
490 iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
491 iqp.fl0fbmax = ((chip == CHELSIO_T5) ?
492 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B);
493 iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
494 iqp.fl0addr = csio_q_pstart(hw, flq_idx);
497 csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
499 if (csio_mb_issue(hw, mbp)) {
500 csio_err(hw, "Issue of IQ cmd failed!\n");
501 mempool_free(mbp, hw->mb_mempool);
508 return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
512 * csio_wr_eq_create_rsp - Response handler for EQ creation.
513 * @hw: The HW module.
515 * @eq_idx: Egress queue that got created.
517 * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
520 csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
522 struct csio_eq_params eqp;
523 enum fw_retval retval;
525 memset(&eqp, 0, sizeof(struct csio_eq_params));
527 csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
529 if (retval != FW_SUCCESS) {
530 csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
531 mempool_free(mbp, hw->mb_mempool);
535 csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
536 csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
537 csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
538 csio_q_inc_idx(hw, eq_idx) = 0;
540 mempool_free(mbp, hw->mb_mempool);
546 * csio_wr_eq_create - Configure an Egress queue with FW.
548 * @priv: Private data.
549 * @eq_idx: Egress queue index in the WR module.
550 * @iq_idx: Associated ingress queue index.
551 * @cbfn: Completion callback.
553 * This API configures a offload egress queue with FW by issuing a
554 * FW_EQ_OFLD_CMD (with alloc + write ) mailbox.
557 csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
558 int iq_idx, uint8_t portid,
559 void (*cbfn) (struct csio_hw *, struct csio_mb *))
562 struct csio_eq_params eqp;
564 memset(&eqp, 0, sizeof(struct csio_eq_params));
566 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
568 csio_err(hw, "EQ command out of memory!\n");
575 eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
576 eqp.iqid = csio_q_iqid(hw, iq_idx);
577 eqp.fbmin = X_FETCHBURSTMIN_64B;
578 eqp.fbmax = X_FETCHBURSTMAX_512B;
580 eqp.pciechn = portid;
581 eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
582 eqp.eqaddr = csio_q_pstart(hw, eq_idx);
584 csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
587 if (csio_mb_issue(hw, mbp)) {
588 csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
589 mempool_free(mbp, hw->mb_mempool);
596 return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
600 * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
601 * @hw: The HW module.
603 * @iq_idx: Ingress queue that was freed.
605 * Handle FW_IQ_CMD (free) mailbox completion.
608 csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
610 enum fw_retval retval = csio_mb_fw_retval(mbp);
613 if (retval != FW_SUCCESS)
616 mempool_free(mbp, hw->mb_mempool);
622 * csio_wr_iq_destroy - Free an ingress queue.
623 * @hw: The HW module.
624 * @priv: Private data object.
625 * @iq_idx: Ingress queue index to destroy
626 * @cbfn: Completion callback.
628 * This API frees an ingress queue by issuing the FW_IQ_CMD
629 * with the free bit set.
632 csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
633 void (*cbfn)(struct csio_hw *, struct csio_mb *))
637 struct csio_iq_params iqp;
640 memset(&iqp, 0, sizeof(struct csio_iq_params));
642 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
648 iqp.iqid = csio_q_iqid(hw, iq_idx);
649 iqp.type = FW_IQ_TYPE_FL_INT_CAP;
651 flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
653 iqp.fl0id = csio_q_flid(hw, flq_idx);
659 csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
661 rv = csio_mb_issue(hw, mbp);
663 mempool_free(mbp, hw->mb_mempool);
670 return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
674 * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
675 * @hw: The HW module.
677 * @eq_idx: Egress queue that was freed.
679 * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
682 csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
684 enum fw_retval retval = csio_mb_fw_retval(mbp);
687 if (retval != FW_SUCCESS)
690 mempool_free(mbp, hw->mb_mempool);
696 * csio_wr_eq_destroy - Free an Egress queue.
697 * @hw: The HW module.
698 * @priv: Private data object.
699 * @eq_idx: Egress queue index to destroy
700 * @cbfn: Completion callback.
702 * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
703 * with the free bit set.
706 csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
707 void (*cbfn) (struct csio_hw *, struct csio_mb *))
711 struct csio_eq_params eqp;
713 memset(&eqp, 0, sizeof(struct csio_eq_params));
715 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
721 eqp.eqid = csio_q_eqid(hw, eq_idx);
723 csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
725 rv = csio_mb_issue(hw, mbp);
727 mempool_free(mbp, hw->mb_mempool);
734 return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
738 * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
740 * @qidx: Egress queue index
742 * Cleanup the Egress queue status page.
745 csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
747 struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
748 struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
750 memset(stp, 0, sizeof(*stp));
754 * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
756 * @qidx: Ingress queue index
758 * Cleanup the footer entries in the given ingress queue,
759 * set to 1 the internal copy of genbit.
762 csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
764 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
765 struct csio_q *q = wrm->q_arr[qidx];
767 struct csio_iqwr_footer *ftr;
770 /* set to 1 since we are just about zero out genbit */
773 for (i = 0; i < q->credits; i++) {
775 wr = (void *)((uintptr_t)q->vstart +
778 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
779 (q->wr_sz - sizeof(*ftr)));
780 /* Zero out footer */
781 memset(ftr, 0, sizeof(*ftr));
786 csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
790 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
793 for (i = 0; i < wrm->free_qidx; i++) {
798 if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
799 csio_wr_cleanup_eq_stpg(hw, i);
801 csio_q_eqid(hw, i) = CSIO_MAX_QID;
805 rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
806 if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
809 csio_q_eqid(hw, i) = CSIO_MAX_QID;
813 if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
814 csio_wr_cleanup_iq_ftr(hw, i);
816 csio_q_iqid(hw, i) = CSIO_MAX_QID;
817 flq_idx = csio_q_iq_flq_idx(hw, i);
819 csio_q_flid(hw, flq_idx) =
824 rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
825 if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
828 csio_q_iqid(hw, i) = CSIO_MAX_QID;
829 flq_idx = csio_q_iq_flq_idx(hw, i);
831 csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
838 hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
844 * csio_wr_get - Get requested size of WR entry/entries from queue.
846 * @qidx: Index of queue.
847 * @size: Cumulative size of Work request(s).
848 * @wrp: Work request pair.
850 * If requested credits are available, return the start address of the
851 * work request in the work request pair. Set pidx accordingly and
854 * NOTE about WR pair:
856 * A WR can start towards the end of a queue, and then continue at the
857 * beginning, since the queue is considered to be circular. This will
858 * require a pair of address/size to be passed back to the caller -
859 * hence Work request pair format.
862 csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
863 struct csio_wr_pair *wrp)
865 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
866 struct csio_q *q = wrm->q_arr[qidx];
867 void *cwr = (void *)((uintptr_t)(q->vstart) +
868 (q->pidx * CSIO_QCREDIT_SZ));
869 struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
870 uint16_t cidx = q->cidx = ntohs(stp->cidx);
871 uint16_t pidx = q->pidx;
872 uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
873 int req_credits = req_sz / CSIO_QCREDIT_SZ;
876 CSIO_DB_ASSERT(q->owner != NULL);
877 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
878 CSIO_DB_ASSERT(cidx <= q->credits);
880 /* Calculate credits */
882 credits = q->credits - (pidx - cidx) - 1;
883 } else if (cidx > pidx) {
884 credits = cidx - pidx - 1;
886 /* cidx == pidx, empty queue */
887 credits = q->credits;
888 CSIO_INC_STATS(q, n_qempty);
892 * Check if we have enough credits.
893 * credits = 1 implies queue is full.
895 if (!credits || (req_credits > credits)) {
896 CSIO_INC_STATS(q, n_qfull);
901 * If we are here, we have enough credits to satisfy the
902 * request. Check if we are near the end of q, and if WR spills over.
903 * If it does, use the first addr/size to cover the queue until
904 * the end. Fit the remainder portion of the request at the top
905 * of queue and return it in the second addr/len. Set pidx
908 if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
910 wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
911 wrp->addr2 = q->vstart;
912 wrp->size2 = req_sz - wrp->size1;
913 q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
915 CSIO_INC_STATS(q, n_qwrap);
916 CSIO_INC_STATS(q, n_eq_wr_split);
922 q->pidx += (uint16_t)req_credits;
924 /* We are the end of queue, roll back pidx to top of queue */
925 if (unlikely(q->pidx == q->credits)) {
927 CSIO_INC_STATS(q, n_qwrap);
931 q->inc_idx = (uint16_t)req_credits;
933 CSIO_INC_STATS(q, n_tot_reqs);
939 * csio_wr_copy_to_wrp - Copies given data into WR.
940 * @data_buf - Data buffer
941 * @wrp - Work request pair.
942 * @wr_off - Work request offset.
943 * @data_len - Data length.
945 * Copies the given data in Work Request. Work request pair(wrp) specifies
946 * address information of Work request.
950 csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
951 uint32_t wr_off, uint32_t data_len)
955 /* Number of space available in buffer addr1 of WRP */
956 nbytes = ((wrp->size1 - wr_off) >= data_len) ?
957 data_len : (wrp->size1 - wr_off);
959 memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
962 /* Write the remaining data from the begining of circular buffer */
964 CSIO_DB_ASSERT(data_len <= wrp->size2);
965 CSIO_DB_ASSERT(wrp->addr2 != NULL);
966 memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
971 * csio_wr_issue - Notify chip of Work request.
973 * @qidx: Index of queue.
974 * @prio: 0: Low priority, 1: High priority
976 * Rings the SGE Doorbell by writing the current producer index of the passed
977 * in queue into the register.
981 csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
983 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
984 struct csio_q *q = wrm->q_arr[qidx];
986 CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
989 /* Ring SGE Doorbell writing q->pidx into it */
990 csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
991 PIDX_T5_V(q->inc_idx) | DBTYPE_F,
992 MYPF_REG(SGE_PF_KDOORBELL_A));
998 static inline uint32_t
999 csio_wr_avail_qcredits(struct csio_q *q)
1001 if (q->pidx > q->cidx)
1002 return q->pidx - q->cidx;
1003 else if (q->cidx > q->pidx)
1004 return q->credits - (q->cidx - q->pidx);
1006 return 0; /* cidx == pidx, empty queue */
1010 * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
1012 * @flq: The freelist queue.
1014 * Invalidate the driver's version of a freelist buffer entry,
1015 * without freeing the associated the DMA memory. The entry
1016 * to be invalidated is picked up from the current Free list
1021 csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
1024 if (flq->cidx == flq->credits) {
1026 CSIO_INC_STATS(flq, n_qwrap);
1031 * csio_wr_process_fl - Process a freelist completion.
1033 * @q: The ingress queue attached to the Freelist.
1034 * @wr: The freelist completion WR in the ingress queue.
1035 * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
1036 * @iq_handler: Caller's handler for this completion.
1037 * @priv: Private pointer of caller
1041 csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
1042 void *wr, uint32_t len_to_qid,
1043 void (*iq_handler)(struct csio_hw *, void *,
1044 uint32_t, struct csio_fl_dma_buf *,
1048 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1049 struct csio_sge *sge = &wrm->sge;
1050 struct csio_fl_dma_buf flb;
1051 struct csio_dma_buf *buf, *fbuf;
1052 uint32_t bufsz, len, lastlen = 0;
1054 struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
1056 CSIO_DB_ASSERT(flq != NULL);
1060 if (len & IQWRF_NEWBUF) {
1061 if (flq->un.fl.offset > 0) {
1062 csio_wr_inval_flq_buf(hw, flq);
1063 flq->un.fl.offset = 0;
1065 len = IQWRF_LEN_GET(len);
1068 CSIO_DB_ASSERT(len != 0);
1072 /* Consume all freelist buffers used for len bytes */
1073 for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
1074 buf = &flq->un.fl.bufs[flq->cidx];
1075 bufsz = csio_wr_fl_bufsz(sge, buf);
1077 fbuf->paddr = buf->paddr;
1078 fbuf->vaddr = buf->vaddr;
1080 flb.offset = flq->un.fl.offset;
1081 lastlen = min(bufsz, len);
1082 fbuf->len = lastlen;
1087 csio_wr_inval_flq_buf(hw, flq);
1090 flb.defer_free = flq->un.fl.packen ? 0 : 1;
1092 iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
1095 if (flq->un.fl.packen)
1096 flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
1098 csio_wr_inval_flq_buf(hw, flq);
1103 * csio_is_new_iqwr - Is this a new Ingress queue entry ?
1104 * @q: Ingress quueue.
1105 * @ftr: Ingress queue WR SGE footer.
1107 * The entry is new if our generation bit matches the corresponding
1108 * bit in the footer of the current WR.
1111 csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
1113 return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
1117 * csio_wr_process_iq - Process elements in Ingress queue.
1119 * @qidx: Index of queue
1120 * @iq_handler: Handler for this queue
1121 * @priv: Caller's private pointer
1123 * This routine walks through every entry of the ingress queue, calling
1124 * the provided iq_handler with the entry, until the generation bit
1128 csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
1129 void (*iq_handler)(struct csio_hw *, void *,
1130 uint32_t, struct csio_fl_dma_buf *,
1134 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1135 void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
1136 struct csio_iqwr_footer *ftr;
1137 uint32_t wr_type, fw_qid, qid;
1138 struct csio_q *q_completed;
1139 struct csio_q *flq = csio_iq_has_fl(q) ?
1140 wrm->q_arr[q->un.iq.flq_idx] : NULL;
1143 /* Get the footer */
1144 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1145 (q->wr_sz - sizeof(*ftr)));
1148 * When q wrapped around last time, driver should have inverted
1149 * ic.genbit as well.
1151 while (csio_is_new_iqwr(q, ftr)) {
1153 CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
1154 (uintptr_t)q->vwrap);
1156 wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
1159 case X_RSPD_TYPE_CPL:
1160 /* Subtract footer from WR len */
1161 iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
1163 case X_RSPD_TYPE_FLBUF:
1164 csio_wr_process_fl(hw, q, wr,
1165 ntohl(ftr->pldbuflen_qid),
1168 case X_RSPD_TYPE_INTR:
1169 fw_qid = ntohl(ftr->pldbuflen_qid);
1170 qid = fw_qid - wrm->fw_iq_start;
1171 q_completed = hw->wrm.intr_map[qid];
1174 csio_q_physiqid(hw, hw->intr_iq_idx))) {
1176 * We are already in the Forward Interrupt
1177 * Interrupt Queue Service! Do-not service
1182 CSIO_DB_ASSERT(q_completed);
1184 q_completed->un.iq.iq_intx_handler);
1186 /* Call the queue handler. */
1187 q_completed->un.iq.iq_intx_handler(hw, NULL,
1188 0, NULL, (void *)q_completed);
1192 csio_warn(hw, "Unknown resp type 0x%x received\n",
1194 CSIO_INC_STATS(q, n_rsp_unknown);
1199 * Ingress *always* has fixed size WR entries. Therefore,
1200 * there should always be complete WRs towards the end of
1203 if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
1205 /* Roll over to start of queue */
1210 q->un.iq.genbit ^= 0x1;
1212 CSIO_INC_STATS(q, n_qwrap);
1215 wr = (void *)((uintptr_t)(q->vstart) +
1216 (q->cidx * q->wr_sz));
1219 ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
1220 (q->wr_sz - sizeof(*ftr)));
1223 } /* while (q->un.iq.genbit == hdr->genbit) */
1226 * We need to re-arm SGE interrupts in case we got a stray interrupt,
1227 * especially in msix mode. With INTx, this may be a common occurence.
1229 if (unlikely(!q->inc_idx)) {
1230 CSIO_INC_STATS(q, n_stray_comp);
1235 /* Replenish free list buffers if pending falls below low water mark */
1237 uint32_t avail = csio_wr_avail_qcredits(flq);
1239 /* Make sure in FLQ, atleast 1 credit (8 FL buffers)
1240 * remains unpopulated otherwise HW thinks
1243 csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
1244 csio_wr_ring_fldb(hw, flq);
1249 /* Now inform SGE about our incremental index value */
1250 csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
1251 INGRESSQID_V(q->un.iq.physiqid) |
1252 TIMERREG_V(csio_sge_timer_reg),
1253 MYPF_REG(SGE_PF_GTS_A));
1254 q->stats.n_tot_rsps += q->inc_idx;
1262 csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
1263 void (*iq_handler)(struct csio_hw *, void *,
1264 uint32_t, struct csio_fl_dma_buf *,
1268 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1269 struct csio_q *iq = wrm->q_arr[qidx];
1271 return csio_wr_process_iq(hw, iq, iq_handler, priv);
1275 csio_closest_timer(struct csio_sge *s, int time)
1277 int i, delta, match = 0, min_delta = INT_MAX;
1279 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1280 delta = time - s->timer_val[i];
1283 if (delta < min_delta) {
1292 csio_closest_thresh(struct csio_sge *s, int cnt)
1294 int i, delta, match = 0, min_delta = INT_MAX;
1296 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1297 delta = cnt - s->counter_val[i];
1300 if (delta < min_delta) {
1309 csio_wr_fixup_host_params(struct csio_hw *hw)
1311 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1312 struct csio_sge *sge = &wrm->sge;
1313 uint32_t clsz = L1_CACHE_BYTES;
1314 uint32_t s_hps = PAGE_SHIFT - 10;
1315 uint32_t stat_len = clsz > 64 ? 128 : 64;
1316 u32 fl_align = clsz < 32 ? 32 : clsz;
1318 u32 ingpad, ingpack;
1321 csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
1322 HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
1323 HOSTPAGESIZEPF4_V(s_hps) | HOSTPAGESIZEPF5_V(s_hps) |
1324 HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
1325 SGE_HOST_PAGE_SIZE_A);
1327 /* T5 introduced the separation of the Free List Padding and
1328 * Packing Boundaries. Thus, we can select a smaller Padding
1329 * Boundary to avoid uselessly chewing up PCIe Link and Memory
1330 * Bandwidth, and use a Packing Boundary which is large enough
1331 * to avoid false sharing between CPUs, etc.
1333 * For the PCI Link, the smaller the Padding Boundary the
1334 * better. For the Memory Controller, a smaller Padding
1335 * Boundary is better until we cross under the Memory Line
1336 * Size (the minimum unit of transfer to/from Memory). If we
1337 * have a Padding Boundary which is smaller than the Memory
1338 * Line Size, that'll involve a Read-Modify-Write cycle on the
1339 * Memory Controller which is never good.
1342 /* We want the Packing Boundary to be based on the Cache Line
1343 * Size in order to help avoid False Sharing performance
1344 * issues between CPUs, etc. We also want the Packing
1345 * Boundary to incorporate the PCI-E Maximum Payload Size. We
1346 * get best performance when the Packing Boundary is a
1347 * multiple of the Maximum Payload Size.
1349 pack_align = fl_align;
1350 pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
1355 /* The PCIe Device Control Maximum Payload Size field
1356 * [bits 7:5] encodes sizes as powers of 2 starting at
1359 pci_read_config_word(hw->pdev,
1360 pcie_cap + PCI_EXP_DEVCTL,
1362 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
1364 if (mps > pack_align)
1368 /* T5/T6 have a special interpretation of the "0"
1369 * value for the Packing Boundary. This corresponds to 16
1370 * bytes instead of the expected 32 bytes.
1372 if (pack_align <= 16) {
1373 ingpack = INGPACKBOUNDARY_16B_X;
1375 } else if (pack_align == 32) {
1376 ingpack = INGPACKBOUNDARY_64B_X;
1379 u32 pack_align_log = fls(pack_align) - 1;
1381 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
1382 fl_align = pack_align;
1385 /* Use the smallest Ingress Padding which isn't smaller than
1386 * the Memory Controller Read/Write Size. We'll take that as
1387 * being 8 bytes since we don't know of any system with a
1388 * wider Memory Controller Bus Width.
1390 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
1391 ingpad = INGPADBOUNDARY_32B_X;
1393 ingpad = T6_INGPADBOUNDARY_8B_X;
1395 csio_set_reg_field(hw, SGE_CONTROL_A,
1396 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
1397 EGRSTATUSPAGESIZE_F,
1398 INGPADBOUNDARY_V(ingpad) |
1399 EGRSTATUSPAGESIZE_V(stat_len != 64));
1400 csio_set_reg_field(hw, SGE_CONTROL2_A,
1401 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
1402 INGPACKBOUNDARY_V(ingpack));
1404 /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
1405 csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
1408 * If using hard params, the following will get set correctly
1409 * in csio_wr_set_sge().
1411 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
1413 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
1414 fl_align - 1) & ~(fl_align - 1),
1415 SGE_FL_BUFFER_SIZE2_A);
1417 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
1418 fl_align - 1) & ~(fl_align - 1),
1419 SGE_FL_BUFFER_SIZE3_A);
1422 sge->csio_fl_align = fl_align;
1424 csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
1426 /* default value of rx_dma_offset of the NIC driver */
1427 csio_set_reg_field(hw, SGE_CONTROL_A,
1428 PKTSHIFT_V(PKTSHIFT_M),
1429 PKTSHIFT_V(CSIO_SGE_RX_DMA_OFFSET));
1431 csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG_A,
1432 CSUM_HAS_PSEUDO_HDR_F, 0);
1436 csio_init_intr_coalesce_parms(struct csio_hw *hw)
1438 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1439 struct csio_sge *sge = &wrm->sge;
1441 csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
1442 if (csio_intr_coalesce_cnt) {
1443 csio_sge_thresh_reg = 0;
1444 csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
1448 csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
1452 * csio_wr_get_sge - Get SGE register values.
1455 * Used by non-master functions and by master-functions relying on config file.
1458 csio_wr_get_sge(struct csio_hw *hw)
1460 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1461 struct csio_sge *sge = &wrm->sge;
1464 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
1465 u32 ingress_rx_threshold;
1467 sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
1469 ingpad = INGPADBOUNDARY_G(sge->sge_control);
1472 case X_INGPCIEBOUNDARY_32B:
1473 sge->csio_fl_align = 32; break;
1474 case X_INGPCIEBOUNDARY_64B:
1475 sge->csio_fl_align = 64; break;
1476 case X_INGPCIEBOUNDARY_128B:
1477 sge->csio_fl_align = 128; break;
1478 case X_INGPCIEBOUNDARY_256B:
1479 sge->csio_fl_align = 256; break;
1480 case X_INGPCIEBOUNDARY_512B:
1481 sge->csio_fl_align = 512; break;
1482 case X_INGPCIEBOUNDARY_1024B:
1483 sge->csio_fl_align = 1024; break;
1484 case X_INGPCIEBOUNDARY_2048B:
1485 sge->csio_fl_align = 2048; break;
1486 case X_INGPCIEBOUNDARY_4096B:
1487 sge->csio_fl_align = 4096; break;
1490 for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1491 csio_get_flbuf_size(hw, sge, i);
1493 timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1_A);
1494 timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3_A);
1495 timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5_A);
1497 sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
1498 TIMERVALUE0_G(timer_value_0_and_1));
1499 sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
1500 TIMERVALUE1_G(timer_value_0_and_1));
1501 sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
1502 TIMERVALUE2_G(timer_value_2_and_3));
1503 sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
1504 TIMERVALUE3_G(timer_value_2_and_3));
1505 sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
1506 TIMERVALUE4_G(timer_value_4_and_5));
1507 sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
1508 TIMERVALUE5_G(timer_value_4_and_5));
1510 ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD_A);
1511 sge->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
1512 sge->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
1513 sge->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
1514 sge->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
1516 csio_init_intr_coalesce_parms(hw);
1520 * csio_wr_set_sge - Initialize SGE registers
1523 * Used by Master function to initialize SGE registers in the absence
1527 csio_wr_set_sge(struct csio_hw *hw)
1529 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1530 struct csio_sge *sge = &wrm->sge;
1534 * Set up our basic SGE mode to deliver CPL messages to our Ingress
1535 * Queue and Packet Date to the Free List.
1537 csio_set_reg_field(hw, SGE_CONTROL_A, RXPKTCPLMODE_F, RXPKTCPLMODE_F);
1539 sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL_A);
1541 /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
1544 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
1545 * and generate an interrupt when this occurs so we can recover.
1547 csio_set_reg_field(hw, SGE_DBFIFO_STATUS_A,
1548 LP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
1549 LP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
1550 csio_set_reg_field(hw, SGE_DBFIFO_STATUS2_A,
1551 HP_INT_THRESH_T5_V(LP_INT_THRESH_T5_M),
1552 HP_INT_THRESH_T5_V(CSIO_SGE_DBFIFO_INT_THRESH));
1554 csio_set_reg_field(hw, SGE_DOORBELL_CONTROL_A, ENABLE_DROP_F,
1557 /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
1559 CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
1560 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
1561 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2_A);
1562 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
1563 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3_A);
1564 CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
1565 CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
1566 CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
1567 CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
1568 CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
1570 for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
1571 csio_get_flbuf_size(hw, sge, i);
1573 /* Initialize interrupt coalescing attributes */
1574 sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
1575 sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
1576 sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
1577 sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
1578 sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
1579 sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
1581 sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
1582 sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
1583 sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
1584 sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
1586 csio_wr_reg32(hw, THRESHOLD_0_V(sge->counter_val[0]) |
1587 THRESHOLD_1_V(sge->counter_val[1]) |
1588 THRESHOLD_2_V(sge->counter_val[2]) |
1589 THRESHOLD_3_V(sge->counter_val[3]),
1590 SGE_INGRESS_RX_THRESHOLD_A);
1593 TIMERVALUE0_V(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
1594 TIMERVALUE1_V(csio_us_to_core_ticks(hw, sge->timer_val[1])),
1595 SGE_TIMER_VALUE_0_AND_1_A);
1598 TIMERVALUE2_V(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
1599 TIMERVALUE3_V(csio_us_to_core_ticks(hw, sge->timer_val[3])),
1600 SGE_TIMER_VALUE_2_AND_3_A);
1603 TIMERVALUE4_V(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
1604 TIMERVALUE5_V(csio_us_to_core_ticks(hw, sge->timer_val[5])),
1605 SGE_TIMER_VALUE_4_AND_5_A);
1607 csio_init_intr_coalesce_parms(hw);
1611 csio_wr_sge_init(struct csio_hw *hw)
1614 * If we are master and chip is not initialized:
1615 * - If we plan to use the config file, we need to fixup some
1616 * host specific registers, and read the rest of the SGE
1618 * - If we dont plan to use the config file, we need to initialize
1619 * SGE entirely, including fixing the host specific registers.
1620 * If we are master and chip is initialized, just read and work off of
1621 * the already initialized SGE values.
1622 * If we arent the master, we are only allowed to read and work off of
1623 * the already initialized SGE values.
1625 * Therefore, before calling this function, we assume that the master-
1626 * ship of the card, state and whether to use config file or not, have
1627 * already been decided.
1629 if (csio_is_hw_master(hw)) {
1630 if (hw->fw_state != CSIO_DEV_STATE_INIT)
1631 csio_wr_fixup_host_params(hw);
1633 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
1634 csio_wr_get_sge(hw);
1636 csio_wr_set_sge(hw);
1638 csio_wr_get_sge(hw);
1642 * csio_wrm_init - Initialize Work request module.
1646 * Allocates memory for an array of queue pointers starting at q_arr.
1649 csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
1654 csio_err(hw, "Num queues is not set\n");
1658 wrm->q_arr = kcalloc(wrm->num_q, sizeof(struct csio_q *), GFP_KERNEL);
1662 for (i = 0; i < wrm->num_q; i++) {
1663 wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
1664 if (!wrm->q_arr[i]) {
1666 kfree(wrm->q_arr[i]);
1681 * csio_wrm_exit - Initialize Work request module.
1685 * Uninitialize WR module. Free q_arr and pointers in it.
1686 * We have the additional job of freeing the DMA memory associated
1690 csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
1695 struct csio_dma_buf *buf;
1697 for (i = 0; i < wrm->num_q; i++) {
1700 if (wrm->free_qidx && (i < wrm->free_qidx)) {
1701 if (q->type == CSIO_FREELIST) {
1704 for (j = 0; j < q->credits; j++) {
1705 buf = &q->un.fl.bufs[j];
1708 dma_free_coherent(&hw->pdev->dev,
1709 buf->len, buf->vaddr,
1712 kfree(q->un.fl.bufs);
1714 dma_free_coherent(&hw->pdev->dev, q->size,
1715 q->vstart, q->pstart);
1720 hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;