2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Fast Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
70 struct bnxt_qplib_cq *scq, *rcq;
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
100 spin_lock(&qp->rcq->flush_lock);
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
167 nq->cqn_handler(nq, cq);
169 spin_unlock_bh(&cq->compl_lock);
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
181 dma_free_coherent(&res->pdev->dev,
182 rq->max_wqe * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
185 dma_free_coherent(&res->pdev->dev,
186 sq->max_wqe * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->max_wqe * qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
209 dev_err(&res->pdev->dev,
210 "Failed to create sq_hdr_buf\n");
215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 if (!qp->rq_hdr_buf) {
223 dev_err(&res->pdev->dev,
224 "Failed to create rq_hdr_buf\n");
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 struct nq_base *nqe, **nq_ptr;
239 int budget = nq->budget;
243 spin_lock_bh(&hwq->lock);
244 /* Service the NQ until empty */
246 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
247 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
248 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
252 * The valid test of the entry must be done first before
253 * reading any further.
257 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
259 case NQ_BASE_TYPE_CQ_NOTIFICATION:
261 struct nq_cn *nqcne = (struct nq_cn *)nqe;
263 q_handle = le32_to_cpu(nqcne->cq_handle_low);
264 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
266 if ((unsigned long)cq == q_handle) {
267 nqcne->cq_handle_low = 0;
268 nqcne->cq_handle_high = 0;
276 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
277 1, &nq->nq_db.dbinfo.flags);
279 spin_unlock_bh(&hwq->lock);
282 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
289 while (retry_cnt--) {
290 if (cnq_events == cq->cnq_events)
292 usleep_range(50, 100);
293 clean_nq(cq->nq, cq);
297 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
299 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
300 struct bnxt_qplib_hwq *hwq = &nq->hwq;
301 struct bnxt_qplib_cq *cq;
302 int budget = nq->budget;
308 spin_lock_bh(&hwq->lock);
309 /* Service the NQ until empty */
311 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
312 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
316 * The valid test of the entry must be done first before
317 * reading any further.
321 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
323 case NQ_BASE_TYPE_CQ_NOTIFICATION:
325 struct nq_cn *nqcne = (struct nq_cn *)nqe;
327 q_handle = le32_to_cpu(nqcne->cq_handle_low);
328 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
330 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
333 cq->toggle = (le16_to_cpu(nqe->info10_type) &
334 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
335 cq->dbinfo.toggle = cq->toggle;
336 bnxt_qplib_armen_db(&cq->dbinfo,
337 DBC_DBC_TYPE_CQ_ARMENA);
338 spin_lock_bh(&cq->compl_lock);
339 atomic_set(&cq->arm_state, 0);
340 if (nq->cqn_handler(nq, (cq)))
341 dev_warn(&nq->pdev->dev,
342 "cqn - type 0x%x not handled\n", type);
344 spin_unlock_bh(&cq->compl_lock);
347 case NQ_BASE_TYPE_SRQ_EVENT:
349 struct bnxt_qplib_srq *srq;
350 struct nq_srq_event *nqsrqe =
351 (struct nq_srq_event *)nqe;
353 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
354 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
356 srq = (struct bnxt_qplib_srq *)q_handle;
357 bnxt_qplib_armen_db(&srq->dbinfo,
358 DBC_DBC_TYPE_SRQ_ARMENA);
359 if (nq->srqn_handler(nq,
360 (struct bnxt_qplib_srq *)q_handle,
362 dev_warn(&nq->pdev->dev,
363 "SRQ event 0x%x not handled\n",
367 case NQ_BASE_TYPE_DBQ_EVENT:
370 dev_warn(&nq->pdev->dev,
371 "nqe with type = 0x%x not handled\n", type);
375 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
376 1, &nq->nq_db.dbinfo.flags);
379 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380 spin_unlock_bh(&hwq->lock);
383 /* bnxt_re_synchronize_nq - self polling notification queue.
384 * @nq - notification queue pointer
386 * This function will start polling entries of a given notification queue
387 * for all pending entries.
388 * This function is useful to synchronize notification entries while resources
392 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
394 int budget = nq->budget;
396 nq->budget = nq->hwq.max_elements;
397 bnxt_qplib_service_nq(&nq->nq_tasklet);
401 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
403 struct bnxt_qplib_nq *nq = dev_instance;
404 struct bnxt_qplib_hwq *hwq = &nq->hwq;
407 /* Prefetch the NQ element */
408 sw_cons = HWQ_CMP(hwq->cons, hwq);
409 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
411 /* Fan out to CPU affinitized kthreads? */
412 tasklet_schedule(&nq->nq_tasklet);
417 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
422 nq->requested = false;
423 /* Mask h/w interrupt */
424 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
425 /* Sync with last running IRQ handler */
426 synchronize_irq(nq->msix_vec);
427 irq_set_affinity_hint(nq->msix_vec, NULL);
428 free_irq(nq->msix_vec, nq);
433 tasklet_kill(&nq->nq_tasklet);
434 tasklet_disable(&nq->nq_tasklet);
437 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
440 destroy_workqueue(nq->cqn_wq);
444 /* Make sure the HW is stopped! */
445 bnxt_qplib_nq_stop_irq(nq, true);
447 if (nq->nq_db.reg.bar_reg) {
448 iounmap(nq->nq_db.reg.bar_reg);
449 nq->nq_db.reg.bar_reg = NULL;
452 nq->cqn_handler = NULL;
453 nq->srqn_handler = NULL;
457 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
458 int msix_vector, bool need_init)
460 struct bnxt_qplib_res *res = nq->res;
466 nq->msix_vec = msix_vector;
468 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
470 tasklet_enable(&nq->nq_tasklet);
472 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
473 nq_indx, pci_name(res->pdev));
476 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
480 tasklet_disable(&nq->nq_tasklet);
484 cpumask_clear(&nq->mask);
485 cpumask_set_cpu(nq_indx, &nq->mask);
486 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
488 dev_warn(&nq->pdev->dev,
489 "set affinity failed; vector: %d nq_idx: %d\n",
490 nq->msix_vec, nq_indx);
492 nq->requested = true;
493 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
498 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
500 resource_size_t reg_base;
501 struct bnxt_qplib_nq_db *nq_db;
502 struct pci_dev *pdev;
507 nq_db->dbinfo.flags = 0;
508 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
509 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
510 if (!nq_db->reg.bar_base) {
511 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
516 reg_base = nq_db->reg.bar_base + reg_offt;
517 /* Unconditionally map 8 bytes to support 57500 series */
519 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
520 if (!nq_db->reg.bar_reg) {
521 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
526 nq_db->dbinfo.db = nq_db->reg.bar_reg;
527 nq_db->dbinfo.hwq = &nq->hwq;
528 nq_db->dbinfo.xid = nq->ring_id;
533 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
534 int nq_idx, int msix_vector, int bar_reg_offset,
535 cqn_handler_t cqn_handler,
536 srqn_handler_t srqn_handler)
541 nq->cqn_handler = cqn_handler;
542 nq->srqn_handler = srqn_handler;
544 /* Have a task to schedule CQ notifiers in post send case */
545 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
549 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
553 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
555 dev_err(&nq->pdev->dev,
556 "Failed to request irq for nq-idx %d\n", nq_idx);
562 bnxt_qplib_disable_nq(nq);
566 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
568 if (nq->hwq.max_elements) {
569 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
570 nq->hwq.max_elements = 0;
574 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
576 struct bnxt_qplib_hwq_attr hwq_attr = {};
577 struct bnxt_qplib_sg_info sginfo = {};
579 nq->pdev = res->pdev;
581 if (!nq->hwq.max_elements ||
582 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
583 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
585 sginfo.pgsize = PAGE_SIZE;
586 sginfo.pgshft = PAGE_SHIFT;
588 hwq_attr.sginfo = &sginfo;
589 hwq_attr.depth = nq->hwq.max_elements;
590 hwq_attr.stride = sizeof(struct nq_base);
591 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
592 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
593 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
601 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
602 struct bnxt_qplib_srq *srq)
604 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605 struct creq_destroy_srq_resp resp = {};
606 struct bnxt_qplib_cmdqmsg msg = {};
607 struct cmdq_destroy_srq req = {};
610 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
611 CMDQ_BASE_OPCODE_DESTROY_SRQ,
614 /* Configure the request */
615 req.srq_cid = cpu_to_le32(srq->id);
617 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
618 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
622 bnxt_qplib_free_hwq(res, &srq->hwq);
625 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
626 struct bnxt_qplib_srq *srq)
628 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
629 struct bnxt_qplib_hwq_attr hwq_attr = {};
630 struct creq_create_srq_resp resp = {};
631 struct bnxt_qplib_cmdqmsg msg = {};
632 struct cmdq_create_srq req = {};
633 struct bnxt_qplib_pbl *pbl;
638 hwq_attr.sginfo = &srq->sg_info;
639 hwq_attr.depth = srq->max_wqe;
640 hwq_attr.stride = srq->wqe_size;
641 hwq_attr.type = HWQ_TYPE_QUEUE;
642 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
646 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
652 srq->dbinfo.flags = 0;
653 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
654 CMDQ_BASE_OPCODE_CREATE_SRQ,
657 /* Configure the request */
658 req.dpi = cpu_to_le32(srq->dpi->dpi);
659 req.srq_handle = cpu_to_le64((uintptr_t)srq);
661 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
662 pbl = &srq->hwq.pbl[PBL_LVL_0];
663 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
664 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
665 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
666 CMDQ_CREATE_SRQ_LVL_SFT;
667 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
668 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
669 req.pd_id = cpu_to_le32(srq->pd->id);
670 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
672 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
673 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
677 spin_lock_init(&srq->lock);
679 srq->last_idx = srq->hwq.max_elements - 1;
680 for (idx = 0; idx < srq->hwq.max_elements; idx++)
681 srq->swq[idx].next_idx = idx + 1;
682 srq->swq[srq->last_idx].next_idx = -1;
684 srq->id = le32_to_cpu(resp.xid);
685 srq->dbinfo.hwq = &srq->hwq;
686 srq->dbinfo.xid = srq->id;
687 srq->dbinfo.db = srq->dpi->dbr;
688 srq->dbinfo.max_slot = 1;
689 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
691 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
692 srq->arm_req = false;
696 bnxt_qplib_free_hwq(res, &srq->hwq);
702 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
703 struct bnxt_qplib_srq *srq)
705 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
708 count = __bnxt_qplib_get_avail(srq_hwq);
709 if (count > srq->threshold) {
710 srq->arm_req = false;
711 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
713 /* Deferred arming */
720 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
721 struct bnxt_qplib_srq *srq)
723 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
724 struct creq_query_srq_resp resp = {};
725 struct bnxt_qplib_cmdqmsg msg = {};
726 struct bnxt_qplib_rcfw_sbuf sbuf;
727 struct creq_query_srq_resp_sb *sb;
728 struct cmdq_query_srq req = {};
731 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
732 CMDQ_BASE_OPCODE_QUERY_SRQ,
735 /* Configure the request */
736 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
737 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
738 &sbuf.dma_addr, GFP_KERNEL);
741 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
742 req.srq_cid = cpu_to_le32(srq->id);
744 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
746 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
747 srq->threshold = le16_to_cpu(sb->srq_limit);
748 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
749 sbuf.sb, sbuf.dma_addr);
754 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
755 struct bnxt_qplib_swqe *wqe)
757 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
759 struct sq_sge *hw_sge;
763 spin_lock(&srq_hwq->lock);
764 if (srq->start_idx == srq->last_idx) {
765 dev_err(&srq_hwq->pdev->dev,
766 "FP: SRQ (0x%x) is full!\n", srq->id);
767 spin_unlock(&srq_hwq->lock);
770 next = srq->start_idx;
771 srq->start_idx = srq->swq[next].next_idx;
772 spin_unlock(&srq_hwq->lock);
774 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
775 memset(srqe, 0, srq->wqe_size);
776 /* Calculate wqe_size16 and data_len */
777 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
778 i < wqe->num_sge; i++, hw_sge++) {
779 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
780 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
781 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
783 srqe->wqe_type = wqe->type;
784 srqe->flags = wqe->flags;
785 srqe->wqe_size = wqe->num_sge +
786 ((offsetof(typeof(*srqe), data) + 15) >> 4);
787 srqe->wr_id[0] = cpu_to_le32((u32)next);
788 srq->swq[next].wr_id = wqe->wr_id;
790 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
792 spin_lock(&srq_hwq->lock);
793 count = __bnxt_qplib_get_avail(srq_hwq);
794 spin_unlock(&srq_hwq->lock);
796 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
797 if (srq->arm_req == true && count > srq->threshold) {
798 srq->arm_req = false;
799 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
807 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
811 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
816 que->swq_last = que->max_wqe - 1;
817 for (indx = 0; indx < que->max_wqe; indx++)
818 que->swq[indx].next_idx = indx + 1;
819 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
825 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
827 struct bnxt_qplib_hwq_attr hwq_attr = {};
828 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
829 struct creq_create_qp1_resp resp = {};
830 struct bnxt_qplib_cmdqmsg msg = {};
831 struct bnxt_qplib_q *sq = &qp->sq;
832 struct bnxt_qplib_q *rq = &qp->rq;
833 struct cmdq_create_qp1 req = {};
834 struct bnxt_qplib_pbl *pbl;
840 sq->dbinfo.flags = 0;
841 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
842 CMDQ_BASE_OPCODE_CREATE_QP1,
846 req.dpi = cpu_to_le32(qp->dpi->dpi);
847 req.qp_handle = cpu_to_le64(qp->qp_handle);
851 hwq_attr.sginfo = &sq->sg_info;
852 hwq_attr.stride = sizeof(struct sq_sge);
853 hwq_attr.depth = bnxt_qplib_get_depth(sq);
854 hwq_attr.type = HWQ_TYPE_QUEUE;
855 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
859 rc = bnxt_qplib_alloc_init_swq(sq);
863 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
864 pbl = &sq->hwq.pbl[PBL_LVL_0];
865 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
866 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
867 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
868 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
869 req.sq_pg_size_sq_lvl = pg_sz_lvl;
871 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
872 CMDQ_CREATE_QP1_SQ_SGE_SFT);
873 req.scq_cid = cpu_to_le32(qp->scq->id);
877 rq->dbinfo.flags = 0;
879 hwq_attr.sginfo = &rq->sg_info;
880 hwq_attr.stride = sizeof(struct sq_sge);
881 hwq_attr.depth = bnxt_qplib_get_depth(rq);
882 hwq_attr.type = HWQ_TYPE_QUEUE;
883 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
886 rc = bnxt_qplib_alloc_init_swq(rq);
889 req.rq_size = cpu_to_le32(rq->max_wqe);
890 pbl = &rq->hwq.pbl[PBL_LVL_0];
891 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
892 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
893 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
894 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
895 req.rq_pg_size_rq_lvl = pg_sz_lvl;
897 cpu_to_le16((rq->max_sge &
898 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
899 CMDQ_CREATE_QP1_RQ_SGE_SFT);
901 req.rcq_cid = cpu_to_le32(qp->rcq->id);
902 /* Header buffer - allow hdr_buf pass in */
903 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
908 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
909 req.qp_flags = cpu_to_le32(qp_flags);
910 req.pd_id = cpu_to_le32(qp->pd->id);
912 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
913 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
917 qp->id = le32_to_cpu(resp.xid);
918 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
919 qp->cctx = res->cctx;
920 sq->dbinfo.hwq = &sq->hwq;
921 sq->dbinfo.xid = qp->id;
922 sq->dbinfo.db = qp->dpi->dbr;
923 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
925 rq->dbinfo.hwq = &rq->hwq;
926 rq->dbinfo.xid = qp->id;
927 rq->dbinfo.db = qp->dpi->dbr;
928 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
930 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
931 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
932 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
937 bnxt_qplib_free_qp_hdr_buf(res, qp);
941 bnxt_qplib_free_hwq(res, &rq->hwq);
945 bnxt_qplib_free_hwq(res, &sq->hwq);
949 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
951 struct bnxt_qplib_hwq *hwq;
952 struct bnxt_qplib_q *sq;
958 /* First psn entry */
959 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
960 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
961 indx_pad = (fpsne & ~PAGE_MASK) / size;
962 hwq->pad_pgofft = indx_pad;
963 hwq->pad_pg = (u64 *)psn_pg;
964 hwq->pad_stride = size;
967 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
969 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
970 struct bnxt_qplib_hwq_attr hwq_attr = {};
971 struct bnxt_qplib_sg_info sginfo = {};
972 struct creq_create_qp_resp resp = {};
973 struct bnxt_qplib_cmdqmsg msg = {};
974 struct bnxt_qplib_q *sq = &qp->sq;
975 struct bnxt_qplib_q *rq = &qp->rq;
976 struct cmdq_create_qp req = {};
977 int rc, req_size, psn_sz = 0;
978 struct bnxt_qplib_hwq *xrrq;
979 struct bnxt_qplib_pbl *pbl;
986 qp->dev_cap_flags = res->dattr->dev_cap_flags;
988 sq->dbinfo.flags = 0;
989 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
990 CMDQ_BASE_OPCODE_CREATE_QP,
995 req.dpi = cpu_to_le32(qp->dpi->dpi);
996 req.qp_handle = cpu_to_le64(qp->qp_handle);
999 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1000 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1001 sizeof(struct sq_psn_search_ext) :
1002 sizeof(struct sq_psn_search);
1004 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1005 psn_sz = sizeof(struct sq_msn_search);
1011 hwq_attr.sginfo = &sq->sg_info;
1012 hwq_attr.stride = sizeof(struct sq_sge);
1013 hwq_attr.depth = bnxt_qplib_get_depth(sq);
1014 hwq_attr.aux_stride = psn_sz;
1015 hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1016 /* Update msn tbl size */
1017 if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
1018 hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1019 qp->msn_tbl_sz = hwq_attr.aux_depth;
1023 hwq_attr.type = HWQ_TYPE_QUEUE;
1024 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1028 rc = bnxt_qplib_alloc_init_swq(sq);
1033 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1035 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1036 pbl = &sq->hwq.pbl[PBL_LVL_0];
1037 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1038 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1039 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1040 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1041 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1043 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1044 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1045 req.scq_cid = cpu_to_le32(qp->scq->id);
1049 rq->dbinfo.flags = 0;
1051 hwq_attr.sginfo = &rq->sg_info;
1052 hwq_attr.stride = sizeof(struct sq_sge);
1053 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1054 hwq_attr.aux_stride = 0;
1055 hwq_attr.aux_depth = 0;
1056 hwq_attr.type = HWQ_TYPE_QUEUE;
1057 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1060 rc = bnxt_qplib_alloc_init_swq(rq);
1064 req.rq_size = cpu_to_le32(rq->max_wqe);
1065 pbl = &rq->hwq.pbl[PBL_LVL_0];
1066 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1067 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1068 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1069 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1070 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1071 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1074 cpu_to_le16(((nsge &
1075 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1076 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1079 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1080 req.srq_cid = cpu_to_le32(qp->srq->id);
1082 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1084 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1085 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1087 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1088 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1089 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1090 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1091 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1093 req.qp_flags = cpu_to_le32(qp_flags);
1098 xrrq->max_elements =
1099 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1100 req_size = xrrq->max_elements *
1101 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1102 req_size &= ~(PAGE_SIZE - 1);
1103 sginfo.pgsize = req_size;
1104 sginfo.pgshft = PAGE_SHIFT;
1107 hwq_attr.sginfo = &sginfo;
1108 hwq_attr.depth = xrrq->max_elements;
1109 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1110 hwq_attr.aux_stride = 0;
1111 hwq_attr.aux_depth = 0;
1112 hwq_attr.type = HWQ_TYPE_CTX;
1113 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1116 pbl = &xrrq->pbl[PBL_LVL_0];
1117 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1120 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1121 qp->max_dest_rd_atomic);
1122 req_size = xrrq->max_elements *
1123 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1124 req_size &= ~(PAGE_SIZE - 1);
1125 sginfo.pgsize = req_size;
1126 hwq_attr.depth = xrrq->max_elements;
1127 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1128 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1132 pbl = &xrrq->pbl[PBL_LVL_0];
1133 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1135 req.pd_id = cpu_to_le32(qp->pd->id);
1137 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1139 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1143 qp->id = le32_to_cpu(resp.xid);
1144 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1145 INIT_LIST_HEAD(&qp->sq_flush);
1146 INIT_LIST_HEAD(&qp->rq_flush);
1147 qp->cctx = res->cctx;
1148 sq->dbinfo.hwq = &sq->hwq;
1149 sq->dbinfo.xid = qp->id;
1150 sq->dbinfo.db = qp->dpi->dbr;
1151 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1153 rq->dbinfo.hwq = &rq->hwq;
1154 rq->dbinfo.xid = qp->id;
1155 rq->dbinfo.db = qp->dpi->dbr;
1156 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1158 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1159 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1160 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1164 bnxt_qplib_free_hwq(res, &qp->irrq);
1166 bnxt_qplib_free_hwq(res, &qp->orrq);
1170 bnxt_qplib_free_hwq(res, &rq->hwq);
1174 bnxt_qplib_free_hwq(res, &sq->hwq);
1178 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1180 switch (qp->state) {
1181 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1182 /* INIT->RTR, configure the path_mtu to the default
1183 * 2048 if not being requested
1185 if (!(qp->modify_flags &
1186 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1188 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1190 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1193 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1194 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1195 if (qp->max_dest_rd_atomic < 1)
1196 qp->max_dest_rd_atomic = 1;
1197 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1198 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1199 if (!(qp->modify_flags &
1200 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1202 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1203 qp->ah.sgid_index = 0;
1211 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1213 switch (qp->state) {
1214 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1215 /* Bono FW requires the max_rd_atomic to be >= 1 */
1216 if (qp->max_rd_atomic < 1)
1217 qp->max_rd_atomic = 1;
1218 /* Bono FW does not allow PKEY_INDEX,
1219 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1220 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1221 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1225 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1226 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1227 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1228 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1229 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1230 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1231 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1232 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1233 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1234 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1235 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1236 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1243 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1245 switch (qp->cur_qp_state) {
1246 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1248 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1249 __modify_flags_from_init_state(qp);
1251 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1252 __modify_flags_from_rtr_state(qp);
1254 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1256 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1258 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1260 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1267 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1269 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1270 struct creq_modify_qp_resp resp = {};
1271 struct bnxt_qplib_cmdqmsg msg = {};
1272 struct cmdq_modify_qp req = {};
1277 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1278 CMDQ_BASE_OPCODE_MODIFY_QP,
1281 /* Filter out the qp_attr_mask based on the state->new transition */
1282 __filter_modify_flags(qp);
1283 bmask = qp->modify_flags;
1284 req.modify_mask = cpu_to_le32(qp->modify_flags);
1285 req.qp_cid = cpu_to_le32(qp->id);
1286 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1287 req.network_type_en_sqd_async_notify_new_state =
1288 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1289 (qp->en_sqd_async_notify ?
1290 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1292 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1294 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1295 req.access = qp->access;
1297 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1298 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1300 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1301 req.qkey = cpu_to_le32(qp->qkey);
1303 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1304 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1305 req.dgid[0] = cpu_to_le32(temp32[0]);
1306 req.dgid[1] = cpu_to_le32(temp32[1]);
1307 req.dgid[2] = cpu_to_le32(temp32[2]);
1308 req.dgid[3] = cpu_to_le32(temp32[3]);
1310 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1311 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1313 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1314 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1315 [qp->ah.sgid_index]);
1317 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1318 req.hop_limit = qp->ah.hop_limit;
1320 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1321 req.traffic_class = qp->ah.traffic_class;
1323 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1324 memcpy(req.dest_mac, qp->ah.dmac, 6);
1326 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1327 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1329 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1330 req.timeout = qp->timeout;
1332 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1333 req.retry_cnt = qp->retry_cnt;
1335 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1336 req.rnr_retry = qp->rnr_retry;
1338 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1339 req.min_rnr_timer = qp->min_rnr_timer;
1341 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1342 req.rq_psn = cpu_to_le32(qp->rq.psn);
1344 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1345 req.sq_psn = cpu_to_le32(qp->sq.psn);
1347 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1349 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1351 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1352 req.max_dest_rd_atomic =
1353 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1355 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1356 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1357 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1358 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1359 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1360 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1361 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1363 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1365 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1366 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1369 qp->cur_qp_state = qp->state;
1373 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1375 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1376 struct creq_query_qp_resp resp = {};
1377 struct bnxt_qplib_cmdqmsg msg = {};
1378 struct bnxt_qplib_rcfw_sbuf sbuf;
1379 struct creq_query_qp_resp_sb *sb;
1380 struct cmdq_query_qp req = {};
1384 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1385 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1386 &sbuf.dma_addr, GFP_KERNEL);
1391 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1392 CMDQ_BASE_OPCODE_QUERY_QP,
1395 req.qp_cid = cpu_to_le32(qp->id);
1396 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1397 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1399 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1402 /* Extract the context from the side buffer */
1403 qp->state = sb->en_sqd_async_notify_state &
1404 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1405 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1406 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1407 qp->access = sb->access;
1408 qp->pkey_index = le16_to_cpu(sb->pkey);
1409 qp->qkey = le32_to_cpu(sb->qkey);
1411 temp32[0] = le32_to_cpu(sb->dgid[0]);
1412 temp32[1] = le32_to_cpu(sb->dgid[1]);
1413 temp32[2] = le32_to_cpu(sb->dgid[2]);
1414 temp32[3] = le32_to_cpu(sb->dgid[3]);
1415 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1417 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1419 qp->ah.sgid_index = 0;
1420 for (i = 0; i < res->sgid_tbl.max; i++) {
1421 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1422 qp->ah.sgid_index = i;
1426 if (i == res->sgid_tbl.max)
1427 dev_warn(&res->pdev->dev, "SGID not found??\n");
1429 qp->ah.hop_limit = sb->hop_limit;
1430 qp->ah.traffic_class = sb->traffic_class;
1431 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1432 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1433 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1434 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1435 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1436 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1437 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1438 qp->timeout = sb->timeout;
1439 qp->retry_cnt = sb->retry_cnt;
1440 qp->rnr_retry = sb->rnr_retry;
1441 qp->min_rnr_timer = sb->min_rnr_timer;
1442 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1443 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1444 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1445 qp->max_dest_rd_atomic =
1446 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1447 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1448 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1449 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1450 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1451 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1452 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1453 memcpy(qp->smac, sb->src_mac, 6);
1454 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1456 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1457 sbuf.sb, sbuf.dma_addr);
1461 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1463 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1464 u32 peek_flags, peek_cons;
1465 struct cq_base *hw_cqe;
1468 peek_flags = cq->dbinfo.flags;
1469 peek_cons = cq_hwq->cons;
1470 for (i = 0; i < cq_hwq->max_elements; i++) {
1471 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1472 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1475 * The valid test of the entry must be done first before
1476 * reading any further.
1479 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1480 case CQ_BASE_CQE_TYPE_REQ:
1481 case CQ_BASE_CQE_TYPE_TERMINAL:
1483 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1485 if (qp == le64_to_cpu(cqe->qp_handle))
1489 case CQ_BASE_CQE_TYPE_RES_RC:
1490 case CQ_BASE_CQE_TYPE_RES_UD:
1491 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1493 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1495 if (qp == le64_to_cpu(cqe->qp_handle))
1502 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1507 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1508 struct bnxt_qplib_qp *qp)
1510 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1511 struct creq_destroy_qp_resp resp = {};
1512 struct bnxt_qplib_cmdqmsg msg = {};
1513 struct cmdq_destroy_qp req = {};
1517 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1518 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1519 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1521 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1522 CMDQ_BASE_OPCODE_DESTROY_QP,
1525 req.qp_cid = cpu_to_le32(qp->id);
1526 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1528 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1530 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1531 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1538 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1539 struct bnxt_qplib_qp *qp)
1541 bnxt_qplib_free_qp_hdr_buf(res, qp);
1542 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1545 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1548 if (qp->irrq.max_elements)
1549 bnxt_qplib_free_hwq(res, &qp->irrq);
1550 if (qp->orrq.max_elements)
1551 bnxt_qplib_free_hwq(res, &qp->orrq);
1555 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1556 struct bnxt_qplib_sge *sge)
1558 struct bnxt_qplib_q *sq = &qp->sq;
1561 memset(sge, 0, sizeof(*sge));
1563 if (qp->sq_hdr_buf) {
1564 sw_prod = sq->swq_start;
1565 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1566 sw_prod * qp->sq_hdr_buf_size);
1567 sge->lkey = 0xFFFFFFFF;
1568 sge->size = qp->sq_hdr_buf_size;
1569 return qp->sq_hdr_buf + sw_prod * sge->size;
1574 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1576 struct bnxt_qplib_q *rq = &qp->rq;
1578 return rq->swq_start;
1581 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1583 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1586 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1587 struct bnxt_qplib_sge *sge)
1589 struct bnxt_qplib_q *rq = &qp->rq;
1592 memset(sge, 0, sizeof(*sge));
1594 if (qp->rq_hdr_buf) {
1595 sw_prod = rq->swq_start;
1596 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1597 sw_prod * qp->rq_hdr_buf_size);
1598 sge->lkey = 0xFFFFFFFF;
1599 sge->size = qp->rq_hdr_buf_size;
1600 return qp->rq_hdr_buf + sw_prod * sge->size;
1605 /* Fil the MSN table into the next psn row */
1606 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1607 struct bnxt_qplib_swqe *wqe,
1608 struct bnxt_qplib_swq *swq)
1610 struct sq_msn_search *msns;
1611 u32 start_psn, next_psn;
1614 msns = (struct sq_msn_search *)swq->psn_search;
1615 msns->start_idx_next_psn_start_psn = 0;
1617 start_psn = swq->start_psn;
1618 next_psn = swq->next_psn;
1619 start_idx = swq->slot_idx;
1620 msns->start_idx_next_psn_start_psn |=
1621 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1623 qp->msn %= qp->msn_tbl_sz;
1626 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1627 struct bnxt_qplib_swqe *wqe,
1628 struct bnxt_qplib_swq *swq)
1630 struct sq_psn_search_ext *psns_ext;
1631 struct sq_psn_search *psns;
1635 if (!swq->psn_search)
1637 /* Handle MSN differently on cap flags */
1638 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1639 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1642 psns = (struct sq_psn_search *)swq->psn_search;
1643 psns = swq->psn_search;
1644 psns_ext = swq->psn_ext;
1646 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1647 SQ_PSN_SEARCH_START_PSN_MASK);
1648 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1649 SQ_PSN_SEARCH_OPCODE_MASK);
1650 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1651 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1653 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1654 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1655 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1656 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1658 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1659 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1663 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1664 struct bnxt_qplib_swqe *wqe,
1667 struct bnxt_qplib_hwq *hwq;
1668 int len, t_len, offt;
1669 bool pull_dst = true;
1670 void *il_dst = NULL;
1671 void *il_src = NULL;
1677 for (indx = 0; indx < wqe->num_sge; indx++) {
1678 len = wqe->sg_list[indx].size;
1679 il_src = (void *)wqe->sg_list[indx].addr;
1681 if (t_len > qp->max_inline_data)
1686 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1691 cplen = min_t(int, len, sizeof(struct sq_sge));
1692 cplen = min_t(int, cplen,
1693 (sizeof(struct sq_sge) - offt));
1694 memcpy(il_dst, il_src, cplen);
1700 if (t_cplen == sizeof(struct sq_sge))
1708 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1709 struct bnxt_qplib_sge *ssge,
1712 struct sq_sge *dsge;
1715 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1716 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1717 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1718 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1719 dsge->size = cpu_to_le32(ssge[indx].size);
1720 len += ssge[indx].size;
1726 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1727 struct bnxt_qplib_swqe *wqe,
1728 u16 *wqe_sz, u16 *qdf, u8 mode)
1734 nsge = wqe->num_sge;
1735 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1736 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1737 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1738 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1739 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1740 bytes += sizeof(struct sq_send_hdr);
1743 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1746 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1751 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1752 struct bnxt_qplib_swq *swq, bool hw_retx)
1754 struct bnxt_qplib_hwq *hwq;
1755 u32 pg_num, pg_indx;
1762 tail = swq->slot_idx / sq->dbinfo.max_slot;
1764 /* For HW retx use qp msn index */
1766 tail %= qp->msn_tbl_sz;
1768 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1769 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1770 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1771 swq->psn_ext = buff;
1772 swq->psn_search = buff;
1775 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1777 struct bnxt_qplib_q *sq = &qp->sq;
1779 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1782 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1783 struct bnxt_qplib_swqe *wqe)
1785 struct bnxt_qplib_nq_work *nq_work = NULL;
1786 int i, rc = 0, data_len = 0, pkt_num = 0;
1787 struct bnxt_qplib_q *sq = &qp->sq;
1788 struct bnxt_qplib_hwq *hwq;
1789 struct bnxt_qplib_swq *swq;
1790 bool sch_handler = false;
1791 u16 wqe_sz, qdf = 0;
1801 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1802 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1803 dev_err(&hwq->pdev->dev,
1804 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1810 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1811 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1812 dev_err(&hwq->pdev->dev,
1813 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1814 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1819 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1820 bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
1823 swq->slot_idx = hwq->prod;
1825 swq->wr_id = wqe->wr_id;
1826 swq->type = wqe->type;
1827 swq->flags = wqe->flags;
1828 swq->start_psn = sq->psn & BTH_PSN_MASK;
1830 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1832 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1834 dev_dbg(&hwq->pdev->dev,
1835 "%s Error QP. Scheduling for poll_cq\n", __func__);
1839 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1840 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1841 memset(base_hdr, 0, sizeof(struct sq_sge));
1842 memset(ext_hdr, 0, sizeof(struct sq_sge));
1844 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1845 /* Copy the inline data */
1846 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1848 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1852 /* Make sure we update MSN table only for wired wqes */
1855 switch (wqe->type) {
1856 case BNXT_QPLIB_SWQE_TYPE_SEND:
1857 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1858 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1859 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1860 /* Assemble info for Raw Ethertype QPs */
1862 sqe->wqe_type = wqe->type;
1863 sqe->flags = wqe->flags;
1864 sqe->wqe_size = wqe_sz;
1865 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1866 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1867 sqe->length = cpu_to_le32(data_len);
1868 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1869 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1870 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1875 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1876 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1878 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1879 struct sq_send_hdr *sqe = base_hdr;
1881 sqe->wqe_type = wqe->type;
1882 sqe->flags = wqe->flags;
1883 sqe->wqe_size = wqe_sz;
1884 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1885 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1886 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1887 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1888 sqe->length = cpu_to_le32(data_len);
1889 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1890 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1891 SQ_SEND_DST_QP_MASK);
1892 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1896 sqe->length = cpu_to_le32(data_len);
1898 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1901 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1905 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1906 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1907 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1909 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1910 struct sq_rdma_hdr *sqe = base_hdr;
1912 sqe->wqe_type = wqe->type;
1913 sqe->flags = wqe->flags;
1914 sqe->wqe_size = wqe_sz;
1915 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1916 sqe->length = cpu_to_le32((u32)data_len);
1917 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1918 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1920 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1923 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1926 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1927 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1929 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1930 struct sq_atomic_hdr *sqe = base_hdr;
1932 sqe->wqe_type = wqe->type;
1933 sqe->flags = wqe->flags;
1934 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1935 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1936 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1937 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1939 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1942 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1945 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1947 struct sq_localinvalidate *sqe = base_hdr;
1949 sqe->wqe_type = wqe->type;
1950 sqe->flags = wqe->flags;
1951 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1955 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1957 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1958 struct sq_fr_pmr_hdr *sqe = base_hdr;
1960 sqe->wqe_type = wqe->type;
1961 sqe->flags = wqe->flags;
1962 sqe->access_cntl = wqe->frmr.access_cntl |
1963 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1964 sqe->zero_based_page_size_log =
1965 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1966 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1967 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1968 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1969 temp32 = cpu_to_le32(wqe->frmr.length);
1970 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1971 sqe->numlevels_pbl_page_size_log =
1972 ((wqe->frmr.pbl_pg_sz_log <<
1973 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1974 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1975 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1976 SQ_FR_PMR_NUMLEVELS_MASK);
1978 for (i = 0; i < wqe->frmr.page_list_len; i++)
1979 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1980 wqe->frmr.page_list[i] |
1982 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1983 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1988 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1990 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1991 struct sq_bind_hdr *sqe = base_hdr;
1993 sqe->wqe_type = wqe->type;
1994 sqe->flags = wqe->flags;
1995 sqe->access_cntl = wqe->bind.access_cntl;
1996 sqe->mw_type_zero_based = wqe->bind.mw_type |
1997 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1998 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1999 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2000 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2001 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2006 /* Bad wqe, return error */
2010 if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
2011 swq->next_psn = sq->psn & BTH_PSN_MASK;
2012 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2015 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2016 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2020 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2022 nq_work->cq = qp->scq;
2023 nq_work->nq = qp->scq->nq;
2024 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2025 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2027 dev_err(&hwq->pdev->dev,
2028 "FP: Failed to allocate SQ nq_work!\n");
2035 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2037 struct bnxt_qplib_q *rq = &qp->rq;
2039 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2042 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2043 struct bnxt_qplib_swqe *wqe)
2045 struct bnxt_qplib_nq_work *nq_work = NULL;
2046 struct bnxt_qplib_q *rq = &qp->rq;
2047 struct rq_wqe_hdr *base_hdr;
2048 struct rq_ext_hdr *ext_hdr;
2049 struct bnxt_qplib_hwq *hwq;
2050 struct bnxt_qplib_swq *swq;
2051 bool sch_handler = false;
2057 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2058 dev_err(&hwq->pdev->dev,
2059 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2065 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2066 dev_err(&hwq->pdev->dev,
2067 "FP: QP (0x%x) RQ is full!\n", qp->id);
2072 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2073 swq->wr_id = wqe->wr_id;
2074 swq->slots = rq->dbinfo.max_slot;
2076 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2078 dev_dbg(&hwq->pdev->dev,
2079 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2084 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2085 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2086 memset(base_hdr, 0, sizeof(struct sq_sge));
2087 memset(ext_hdr, 0, sizeof(struct sq_sge));
2088 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2089 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2090 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2091 if (!wqe->num_sge) {
2094 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2098 base_hdr->wqe_type = wqe->type;
2099 base_hdr->flags = wqe->flags;
2100 base_hdr->wqe_size = wqe_sz;
2101 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2103 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2104 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2107 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2109 nq_work->cq = qp->rcq;
2110 nq_work->nq = qp->rcq->nq;
2111 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2112 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2114 dev_err(&hwq->pdev->dev,
2115 "FP: Failed to allocate RQ nq_work!\n");
2124 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2126 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2127 struct bnxt_qplib_hwq_attr hwq_attr = {};
2128 struct creq_create_cq_resp resp = {};
2129 struct bnxt_qplib_cmdqmsg msg = {};
2130 struct cmdq_create_cq req = {};
2131 struct bnxt_qplib_pbl *pbl;
2136 dev_err(&rcfw->pdev->dev,
2137 "FP: CREATE_CQ failed due to NULL DPI\n");
2141 cq->dbinfo.flags = 0;
2143 hwq_attr.depth = cq->max_wqe;
2144 hwq_attr.stride = sizeof(struct cq_base);
2145 hwq_attr.type = HWQ_TYPE_QUEUE;
2146 hwq_attr.sginfo = &cq->sg_info;
2147 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2151 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2152 CMDQ_BASE_OPCODE_CREATE_CQ,
2155 req.dpi = cpu_to_le32(cq->dpi->dpi);
2156 req.cq_handle = cpu_to_le64(cq->cq_handle);
2157 req.cq_size = cpu_to_le32(cq->max_wqe);
2158 pbl = &cq->hwq.pbl[PBL_LVL_0];
2159 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2160 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2161 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2162 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2163 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2164 req.cq_fco_cnq_id = cpu_to_le32(
2165 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2166 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2167 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2169 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2173 cq->id = le32_to_cpu(resp.xid);
2174 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2175 init_waitqueue_head(&cq->waitq);
2176 INIT_LIST_HEAD(&cq->sqf_head);
2177 INIT_LIST_HEAD(&cq->rqf_head);
2178 spin_lock_init(&cq->compl_lock);
2179 spin_lock_init(&cq->flush_lock);
2181 cq->dbinfo.hwq = &cq->hwq;
2182 cq->dbinfo.xid = cq->id;
2183 cq->dbinfo.db = cq->dpi->dbr;
2184 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2185 cq->dbinfo.flags = 0;
2186 cq->dbinfo.toggle = 0;
2188 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2193 bnxt_qplib_free_hwq(res, &cq->hwq);
2197 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2198 struct bnxt_qplib_cq *cq)
2200 bnxt_qplib_free_hwq(res, &cq->hwq);
2201 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2202 /* Reset only the cons bit in the flags */
2203 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2206 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2209 struct bnxt_qplib_hwq_attr hwq_attr = {};
2210 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2211 struct creq_resize_cq_resp resp = {};
2212 struct bnxt_qplib_cmdqmsg msg = {};
2213 struct cmdq_resize_cq req = {};
2214 struct bnxt_qplib_pbl *pbl;
2215 u32 pg_sz, lvl, new_sz;
2218 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2219 CMDQ_BASE_OPCODE_RESIZE_CQ,
2221 hwq_attr.sginfo = &cq->sg_info;
2223 hwq_attr.depth = new_cqes;
2224 hwq_attr.stride = sizeof(struct cq_base);
2225 hwq_attr.type = HWQ_TYPE_QUEUE;
2226 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2230 req.cq_cid = cpu_to_le32(cq->id);
2231 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2232 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2233 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2234 CMDQ_RESIZE_CQ_LVL_MASK;
2235 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2236 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2237 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2238 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2240 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2242 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2246 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2248 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2249 struct creq_destroy_cq_resp resp = {};
2250 struct bnxt_qplib_cmdqmsg msg = {};
2251 struct cmdq_destroy_cq req = {};
2252 u16 total_cnq_events;
2255 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2256 CMDQ_BASE_OPCODE_DESTROY_CQ,
2259 req.cq_cid = cpu_to_le32(cq->id);
2260 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2262 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2265 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2266 __wait_for_all_nqes(cq, total_cnq_events);
2267 bnxt_qplib_free_hwq(res, &cq->hwq);
2271 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2272 struct bnxt_qplib_cqe **pcqe, int *budget)
2274 struct bnxt_qplib_cqe *cqe;
2278 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2279 start = sq->swq_start;
2282 last = sq->swq_last;
2285 /* Skip the FENCE WQE completions */
2286 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2287 bnxt_qplib_cancel_phantom_processing(qp);
2290 memset(cqe, 0, sizeof(*cqe));
2291 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2292 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2293 cqe->qp_handle = (u64)(unsigned long)qp;
2294 cqe->wr_id = sq->swq[last].wr_id;
2295 cqe->src_qp = qp->id;
2296 cqe->type = sq->swq[last].type;
2300 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2301 sq->swq[last].slots, &sq->dbinfo.flags);
2302 sq->swq_last = sq->swq[last].next_idx;
2305 if (!(*budget) && sq->swq_last != start)
2312 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2313 struct bnxt_qplib_cqe **pcqe, int *budget)
2315 struct bnxt_qplib_cqe *cqe;
2321 case CMDQ_CREATE_QP1_TYPE_GSI:
2322 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2324 case CMDQ_CREATE_QP_TYPE_RC:
2325 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2327 case CMDQ_CREATE_QP_TYPE_UD:
2328 case CMDQ_CREATE_QP_TYPE_GSI:
2329 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2333 /* Flush the rest of the RQ */
2334 start = rq->swq_start;
2337 last = rq->swq_last;
2340 memset(cqe, 0, sizeof(*cqe));
2342 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2343 cqe->opcode = opcode;
2344 cqe->qp_handle = (unsigned long)qp;
2345 cqe->wr_id = rq->swq[last].wr_id;
2348 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2349 rq->swq[last].slots, &rq->dbinfo.flags);
2350 rq->swq_last = rq->swq[last].next_idx;
2353 if (!*budget && rq->swq_last != start)
2360 void bnxt_qplib_mark_qp_error(void *qp_handle)
2362 struct bnxt_qplib_qp *qp = qp_handle;
2367 /* Must block new posting of SQ and RQ */
2368 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2369 bnxt_qplib_cancel_phantom_processing(qp);
2372 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2373 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2375 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2376 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2378 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2379 struct bnxt_qplib_q *sq = &qp->sq;
2380 struct cq_req *peek_req_hwcqe;
2381 struct bnxt_qplib_qp *peek_qp;
2382 struct bnxt_qplib_q *peek_sq;
2383 struct bnxt_qplib_swq *swq;
2384 struct cq_base *peek_hwcqe;
2388 /* Check for the psn_search marking before completing */
2389 swq = &sq->swq[swq_last];
2390 if (swq->psn_search &&
2391 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2393 swq->psn_search->flags_next_psn = cpu_to_le32
2394 (le32_to_cpu(swq->psn_search->flags_next_psn)
2396 dev_dbg(&cq->hwq.pdev->dev,
2397 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2398 cq_cons, qp->id, swq_last, cqe_sq_cons);
2399 sq->condition = true;
2400 sq->send_phantom = true;
2402 /* TODO: Only ARM if the previous SQE is ARMALL */
2403 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2407 if (sq->condition) {
2408 /* Peek at the completions */
2409 peek_flags = cq->dbinfo.flags;
2410 peek_sw_cq_cons = cq_cons;
2411 i = cq->hwq.max_elements;
2413 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2414 peek_sw_cq_cons, NULL);
2415 /* If the next hwcqe is VALID */
2416 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2418 * The valid test of the entry must be done first before
2419 * reading any further.
2422 /* If the next hwcqe is a REQ */
2423 if ((peek_hwcqe->cqe_type_toggle &
2424 CQ_BASE_CQE_TYPE_MASK) ==
2425 CQ_BASE_CQE_TYPE_REQ) {
2426 peek_req_hwcqe = (struct cq_req *)
2428 peek_qp = (struct bnxt_qplib_qp *)
2431 (peek_req_hwcqe->qp_handle));
2432 peek_sq = &peek_qp->sq;
2435 peek_req_hwcqe->sq_cons_idx)
2436 - 1) % sq->max_wqe);
2437 /* If the hwcqe's sq's wr_id matches */
2438 if (peek_sq == sq &&
2439 sq->swq[peek_sq_cons_idx].wr_id ==
2440 BNXT_QPLIB_FENCE_WRID) {
2442 * Unbreak only if the phantom
2445 dev_dbg(&cq->hwq.pdev->dev,
2446 "FP: Got Phantom CQE\n");
2447 sq->condition = false;
2453 /* Valid but not the phantom, so keep looping */
2455 /* Not valid yet, just exit and wait */
2459 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2463 dev_err(&cq->hwq.pdev->dev,
2464 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2465 cq_cons, qp->id, swq_last, cqe_sq_cons);
2472 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2473 struct cq_req *hwcqe,
2474 struct bnxt_qplib_cqe **pcqe, int *budget,
2475 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2477 struct bnxt_qplib_swq *swq;
2478 struct bnxt_qplib_cqe *cqe;
2479 struct bnxt_qplib_qp *qp;
2480 struct bnxt_qplib_q *sq;
2484 qp = (struct bnxt_qplib_qp *)((unsigned long)
2485 le64_to_cpu(hwcqe->qp_handle));
2487 dev_err(&cq->hwq.pdev->dev,
2488 "FP: Process Req qp is NULL\n");
2493 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2494 if (qp->sq.flushed) {
2495 dev_dbg(&cq->hwq.pdev->dev,
2496 "%s: QP in Flush QP = %p\n", __func__, qp);
2499 /* Require to walk the sq's swq to fabricate CQEs for all previously
2500 * signaled SWQEs due to CQE aggregation from the current sq cons
2501 * to the cqe_sq_cons
2505 if (sq->swq_last == cqe_sq_cons)
2509 swq = &sq->swq[sq->swq_last];
2510 memset(cqe, 0, sizeof(*cqe));
2511 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2512 cqe->qp_handle = (u64)(unsigned long)qp;
2513 cqe->src_qp = qp->id;
2514 cqe->wr_id = swq->wr_id;
2515 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2517 cqe->type = swq->type;
2519 /* For the last CQE, check for status. For errors, regardless
2520 * of the request being signaled or not, it must complete with
2521 * the hwcqe error status
2523 if (swq->next_idx == cqe_sq_cons &&
2524 hwcqe->status != CQ_REQ_STATUS_OK) {
2525 cqe->status = hwcqe->status;
2526 dev_err(&cq->hwq.pdev->dev,
2527 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2528 sq->swq_last, cqe->wr_id, cqe->status);
2531 bnxt_qplib_mark_qp_error(qp);
2532 /* Add qp to flush list of the CQ */
2533 bnxt_qplib_add_flush_qp(qp);
2535 /* Before we complete, do WA 9060 */
2536 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2541 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2542 cqe->status = CQ_REQ_STATUS_OK;
2548 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2549 swq->slots, &sq->dbinfo.flags);
2550 sq->swq_last = swq->next_idx;
2556 if (sq->swq_last != cqe_sq_cons) {
2562 * Back to normal completion mode only after it has completed all of
2563 * the WC for this CQE
2570 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2572 spin_lock(&srq->hwq.lock);
2573 srq->swq[srq->last_idx].next_idx = (int)tag;
2574 srq->last_idx = (int)tag;
2575 srq->swq[srq->last_idx].next_idx = -1;
2576 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2577 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2578 spin_unlock(&srq->hwq.lock);
2581 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2582 struct cq_res_rc *hwcqe,
2583 struct bnxt_qplib_cqe **pcqe,
2586 struct bnxt_qplib_srq *srq;
2587 struct bnxt_qplib_cqe *cqe;
2588 struct bnxt_qplib_qp *qp;
2589 struct bnxt_qplib_q *rq;
2592 qp = (struct bnxt_qplib_qp *)((unsigned long)
2593 le64_to_cpu(hwcqe->qp_handle));
2595 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2598 if (qp->rq.flushed) {
2599 dev_dbg(&cq->hwq.pdev->dev,
2600 "%s: QP in Flush QP = %p\n", __func__, qp);
2605 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2606 cqe->length = le32_to_cpu(hwcqe->length);
2607 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2608 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2609 cqe->flags = le16_to_cpu(hwcqe->flags);
2610 cqe->status = hwcqe->status;
2611 cqe->qp_handle = (u64)(unsigned long)qp;
2613 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2614 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2615 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2619 if (wr_id_idx >= srq->hwq.max_elements) {
2620 dev_err(&cq->hwq.pdev->dev,
2621 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2622 wr_id_idx, srq->hwq.max_elements);
2625 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2626 bnxt_qplib_release_srqe(srq, wr_id_idx);
2631 struct bnxt_qplib_swq *swq;
2634 if (wr_id_idx > (rq->max_wqe - 1)) {
2635 dev_err(&cq->hwq.pdev->dev,
2636 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2637 wr_id_idx, rq->max_wqe);
2640 if (wr_id_idx != rq->swq_last)
2642 swq = &rq->swq[rq->swq_last];
2643 cqe->wr_id = swq->wr_id;
2646 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2647 swq->slots, &rq->dbinfo.flags);
2648 rq->swq_last = swq->next_idx;
2651 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2652 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2653 /* Add qp to flush list of the CQ */
2654 bnxt_qplib_add_flush_qp(qp);
2661 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2662 struct cq_res_ud *hwcqe,
2663 struct bnxt_qplib_cqe **pcqe,
2666 struct bnxt_qplib_srq *srq;
2667 struct bnxt_qplib_cqe *cqe;
2668 struct bnxt_qplib_qp *qp;
2669 struct bnxt_qplib_q *rq;
2672 qp = (struct bnxt_qplib_qp *)((unsigned long)
2673 le64_to_cpu(hwcqe->qp_handle));
2675 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2678 if (qp->rq.flushed) {
2679 dev_dbg(&cq->hwq.pdev->dev,
2680 "%s: QP in Flush QP = %p\n", __func__, qp);
2684 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2685 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2686 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2687 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2688 cqe->flags = le16_to_cpu(hwcqe->flags);
2689 cqe->status = hwcqe->status;
2690 cqe->qp_handle = (u64)(unsigned long)qp;
2691 /*FIXME: Endianness fix needed for smace */
2692 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2693 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2694 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2695 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2697 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2698 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2700 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2705 if (wr_id_idx >= srq->hwq.max_elements) {
2706 dev_err(&cq->hwq.pdev->dev,
2707 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2708 wr_id_idx, srq->hwq.max_elements);
2711 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2712 bnxt_qplib_release_srqe(srq, wr_id_idx);
2717 struct bnxt_qplib_swq *swq;
2720 if (wr_id_idx > (rq->max_wqe - 1)) {
2721 dev_err(&cq->hwq.pdev->dev,
2722 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2723 wr_id_idx, rq->max_wqe);
2727 if (rq->swq_last != wr_id_idx)
2729 swq = &rq->swq[rq->swq_last];
2730 cqe->wr_id = swq->wr_id;
2733 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2734 swq->slots, &rq->dbinfo.flags);
2735 rq->swq_last = swq->next_idx;
2738 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2739 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2740 /* Add qp to flush list of the CQ */
2741 bnxt_qplib_add_flush_qp(qp);
2748 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2750 struct cq_base *hw_cqe;
2753 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2754 /* Check for Valid bit. If the CQE is valid, return false */
2755 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2759 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2760 struct cq_res_raweth_qp1 *hwcqe,
2761 struct bnxt_qplib_cqe **pcqe,
2764 struct bnxt_qplib_qp *qp;
2765 struct bnxt_qplib_q *rq;
2766 struct bnxt_qplib_srq *srq;
2767 struct bnxt_qplib_cqe *cqe;
2770 qp = (struct bnxt_qplib_qp *)((unsigned long)
2771 le64_to_cpu(hwcqe->qp_handle));
2773 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2776 if (qp->rq.flushed) {
2777 dev_dbg(&cq->hwq.pdev->dev,
2778 "%s: QP in Flush QP = %p\n", __func__, qp);
2782 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2783 cqe->flags = le16_to_cpu(hwcqe->flags);
2784 cqe->qp_handle = (u64)(unsigned long)qp;
2787 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2788 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2789 cqe->src_qp = qp->id;
2790 if (qp->id == 1 && !cqe->length) {
2791 /* Add workaround for the length misdetection */
2794 cqe->length = le16_to_cpu(hwcqe->length);
2796 cqe->pkey_index = qp->pkey_index;
2797 memcpy(cqe->smac, qp->smac, 6);
2799 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2800 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2801 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2803 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2806 dev_err(&cq->hwq.pdev->dev,
2807 "FP: SRQ used but not defined??\n");
2810 if (wr_id_idx >= srq->hwq.max_elements) {
2811 dev_err(&cq->hwq.pdev->dev,
2812 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2813 wr_id_idx, srq->hwq.max_elements);
2816 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2817 bnxt_qplib_release_srqe(srq, wr_id_idx);
2822 struct bnxt_qplib_swq *swq;
2825 if (wr_id_idx > (rq->max_wqe - 1)) {
2826 dev_err(&cq->hwq.pdev->dev,
2827 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2828 wr_id_idx, rq->max_wqe);
2831 if (rq->swq_last != wr_id_idx)
2833 swq = &rq->swq[rq->swq_last];
2834 cqe->wr_id = swq->wr_id;
2837 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2838 swq->slots, &rq->dbinfo.flags);
2839 rq->swq_last = swq->next_idx;
2842 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2843 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2844 /* Add qp to flush list of the CQ */
2845 bnxt_qplib_add_flush_qp(qp);
2852 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2853 struct cq_terminal *hwcqe,
2854 struct bnxt_qplib_cqe **pcqe,
2857 struct bnxt_qplib_qp *qp;
2858 struct bnxt_qplib_q *sq, *rq;
2859 struct bnxt_qplib_cqe *cqe;
2860 u32 swq_last = 0, cqe_cons;
2863 /* Check the Status */
2864 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2865 dev_warn(&cq->hwq.pdev->dev,
2866 "FP: CQ Process Terminal Error status = 0x%x\n",
2869 qp = (struct bnxt_qplib_qp *)((unsigned long)
2870 le64_to_cpu(hwcqe->qp_handle));
2874 /* Must block new posting of SQ and RQ */
2875 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2880 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2881 if (cqe_cons == 0xFFFF)
2883 cqe_cons %= sq->max_wqe;
2885 if (qp->sq.flushed) {
2886 dev_dbg(&cq->hwq.pdev->dev,
2887 "%s: QP in Flush QP = %p\n", __func__, qp);
2891 /* Terminal CQE can also include aggregated successful CQEs prior.
2892 * So we must complete all CQEs from the current sq's cons to the
2893 * cq_cons with status OK
2897 swq_last = sq->swq_last;
2898 if (swq_last == cqe_cons)
2900 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2901 memset(cqe, 0, sizeof(*cqe));
2902 cqe->status = CQ_REQ_STATUS_OK;
2903 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2904 cqe->qp_handle = (u64)(unsigned long)qp;
2905 cqe->src_qp = qp->id;
2906 cqe->wr_id = sq->swq[swq_last].wr_id;
2907 cqe->type = sq->swq[swq_last].type;
2911 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2912 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2913 sq->swq_last = sq->swq[swq_last].next_idx;
2916 if (!(*budget) && swq_last != cqe_cons) {
2925 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2926 if (cqe_cons == 0xFFFF) {
2928 } else if (cqe_cons > rq->max_wqe - 1) {
2929 dev_err(&cq->hwq.pdev->dev,
2930 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2931 cqe_cons, rq->max_wqe);
2936 if (qp->rq.flushed) {
2937 dev_dbg(&cq->hwq.pdev->dev,
2938 "%s: QP in Flush QP = %p\n", __func__, qp);
2943 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2944 * from the current rq->cons to the rq->prod regardless what the
2945 * rq->cons the terminal CQE indicates
2948 /* Add qp to flush list of the CQ */
2949 bnxt_qplib_add_flush_qp(qp);
2954 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2955 struct cq_cutoff *hwcqe)
2957 /* Check the Status */
2958 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2959 dev_err(&cq->hwq.pdev->dev,
2960 "FP: CQ Process Cutoff Error status = 0x%x\n",
2964 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2965 wake_up_interruptible(&cq->waitq);
2970 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2971 struct bnxt_qplib_cqe *cqe,
2974 struct bnxt_qplib_qp *qp = NULL;
2975 u32 budget = num_cqes;
2976 unsigned long flags;
2978 spin_lock_irqsave(&cq->flush_lock, flags);
2979 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2980 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2981 __flush_sq(&qp->sq, qp, &cqe, &budget);
2984 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2985 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2986 __flush_rq(&qp->rq, qp, &cqe, &budget);
2988 spin_unlock_irqrestore(&cq->flush_lock, flags);
2990 return num_cqes - budget;
2993 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2994 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2996 struct cq_base *hw_cqe;
3004 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3006 /* Check for Valid bit */
3007 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3011 * The valid test of the entry must be done first before
3012 * reading any further.
3015 /* From the device's respective CQE format to qplib_wc*/
3016 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3018 case CQ_BASE_CQE_TYPE_REQ:
3019 rc = bnxt_qplib_cq_process_req(cq,
3020 (struct cq_req *)hw_cqe,
3022 cq->hwq.cons, lib_qp);
3024 case CQ_BASE_CQE_TYPE_RES_RC:
3025 rc = bnxt_qplib_cq_process_res_rc(cq,
3026 (struct cq_res_rc *)
3030 case CQ_BASE_CQE_TYPE_RES_UD:
3031 rc = bnxt_qplib_cq_process_res_ud
3032 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3035 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3036 rc = bnxt_qplib_cq_process_res_raweth_qp1
3037 (cq, (struct cq_res_raweth_qp1 *)
3038 hw_cqe, &cqe, &budget);
3040 case CQ_BASE_CQE_TYPE_TERMINAL:
3041 rc = bnxt_qplib_cq_process_terminal
3042 (cq, (struct cq_terminal *)hw_cqe,
3045 case CQ_BASE_CQE_TYPE_CUT_OFF:
3046 bnxt_qplib_cq_process_cutoff
3047 (cq, (struct cq_cutoff *)hw_cqe);
3048 /* Done processing this CQ */
3051 dev_err(&cq->hwq.pdev->dev,
3052 "process_cq unknown type 0x%lx\n",
3053 hw_cqe->cqe_type_toggle &
3054 CQ_BASE_CQE_TYPE_MASK);
3061 /* Error while processing the CQE, just skip to the
3064 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3065 dev_err(&cq->hwq.pdev->dev,
3066 "process_cqe error rc = 0x%x\n", rc);
3069 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3070 1, &cq->dbinfo.flags);
3074 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3076 return num_cqes - budget;
3079 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3081 cq->dbinfo.toggle = cq->toggle;
3083 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3084 /* Using cq->arm_state variable to track whether to issue cq handler */
3085 atomic_set(&cq->arm_state, 1);
3088 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3090 flush_workqueue(qp->scq->nq->cqn_wq);
3091 if (qp->scq != qp->rcq)
3092 flush_workqueue(qp->rcq->nq->cqn_wq);