1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/qed/common_hsi.h>
47 #include "qedr_hsi_rdma.h"
48 #include <linux/qed/qed_if.h>
51 #include <rdma/qedr-abi.h>
52 #include "qedr_roce_cm.h"
54 #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
55 #define RDMA_MAX_SGE_PER_SRQ (4)
56 #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
58 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
60 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
63 size_t min_len = min_t(size_t, len, udata->outlen);
65 return ib_copy_to_udata(udata, src, min_len);
68 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
70 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
73 *pkey = QEDR_ROCE_PKEY_DEFAULT;
77 int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
78 int index, union ib_gid *sgid)
80 struct qedr_dev *dev = get_qedr_dev(ibdev);
82 memset(sgid->raw, 0, sizeof(sgid->raw));
83 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
85 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
86 sgid->global.interface_id, sgid->global.subnet_prefix);
91 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
93 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
94 struct qedr_device_attr *qattr = &dev->attr;
95 struct qedr_srq *srq = get_qedr_srq(ibsrq);
97 srq_attr->srq_limit = srq->srq_limit;
98 srq_attr->max_wr = qattr->max_srq_wr;
99 srq_attr->max_sge = qattr->max_sge;
104 int qedr_query_device(struct ib_device *ibdev,
105 struct ib_device_attr *attr, struct ib_udata *udata)
107 struct qedr_dev *dev = get_qedr_dev(ibdev);
108 struct qedr_device_attr *qattr = &dev->attr;
110 if (!dev->rdma_ctx) {
112 "qedr_query_device called with invalid params rdma_ctx=%p\n",
117 memset(attr, 0, sizeof(*attr));
119 attr->fw_ver = qattr->fw_ver;
120 attr->sys_image_guid = qattr->sys_image_guid;
121 attr->max_mr_size = qattr->max_mr_size;
122 attr->page_size_cap = qattr->page_size_caps;
123 attr->vendor_id = qattr->vendor_id;
124 attr->vendor_part_id = qattr->vendor_part_id;
125 attr->hw_ver = qattr->hw_ver;
126 attr->max_qp = qattr->max_qp;
127 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
128 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
129 IB_DEVICE_RC_RNR_NAK_GEN |
130 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
132 attr->max_send_sge = qattr->max_sge;
133 attr->max_recv_sge = qattr->max_sge;
134 attr->max_sge_rd = qattr->max_sge;
135 attr->max_cq = qattr->max_cq;
136 attr->max_cqe = qattr->max_cqe;
137 attr->max_mr = qattr->max_mr;
138 attr->max_mw = qattr->max_mw;
139 attr->max_pd = qattr->max_pd;
140 attr->atomic_cap = dev->atomic_cap;
141 attr->max_fmr = qattr->max_fmr;
142 attr->max_map_per_fmr = 16;
143 attr->max_qp_init_rd_atom =
144 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
145 attr->max_qp_rd_atom =
146 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
147 attr->max_qp_init_rd_atom);
149 attr->max_srq = qattr->max_srq;
150 attr->max_srq_sge = qattr->max_srq_sge;
151 attr->max_srq_wr = qattr->max_srq_wr;
153 attr->local_ca_ack_delay = qattr->dev_ack_delay;
154 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
155 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
156 attr->max_ah = qattr->max_ah;
161 #define QEDR_SPEED_SDR (1)
162 #define QEDR_SPEED_DDR (2)
163 #define QEDR_SPEED_QDR (4)
164 #define QEDR_SPEED_FDR10 (8)
165 #define QEDR_SPEED_FDR (16)
166 #define QEDR_SPEED_EDR (32)
168 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
173 *ib_speed = QEDR_SPEED_SDR;
174 *ib_width = IB_WIDTH_1X;
177 *ib_speed = QEDR_SPEED_QDR;
178 *ib_width = IB_WIDTH_1X;
182 *ib_speed = QEDR_SPEED_DDR;
183 *ib_width = IB_WIDTH_4X;
187 *ib_speed = QEDR_SPEED_EDR;
188 *ib_width = IB_WIDTH_1X;
192 *ib_speed = QEDR_SPEED_QDR;
193 *ib_width = IB_WIDTH_4X;
197 *ib_speed = QEDR_SPEED_QDR;
198 *ib_width = IB_WIDTH_4X;
202 *ib_speed = QEDR_SPEED_EDR;
203 *ib_width = IB_WIDTH_4X;
208 *ib_speed = QEDR_SPEED_SDR;
209 *ib_width = IB_WIDTH_1X;
213 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
215 struct qedr_dev *dev;
216 struct qed_rdma_port *rdma_port;
218 dev = get_qedr_dev(ibdev);
220 DP_ERR(dev, "invalid_port=0x%x\n", port);
224 if (!dev->rdma_ctx) {
225 DP_ERR(dev, "rdma_ctx is NULL\n");
229 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
231 /* *attr being zeroed by the caller, avoid zeroing it here */
232 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
233 attr->state = IB_PORT_ACTIVE;
234 attr->phys_state = 5;
236 attr->state = IB_PORT_DOWN;
237 attr->phys_state = 3;
239 attr->max_mtu = IB_MTU_4096;
240 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
245 attr->ip_gids = true;
246 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
247 attr->gid_tbl_len = 1;
248 attr->pkey_tbl_len = 1;
250 attr->gid_tbl_len = QEDR_MAX_SGID;
251 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
253 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
254 attr->qkey_viol_cntr = 0;
255 get_link_speed_and_width(rdma_port->link_speed,
256 &attr->active_speed, &attr->active_width);
257 attr->max_msg_sz = rdma_port->max_msg_size;
258 attr->max_vl_num = 4;
263 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
264 struct ib_port_modify *props)
266 struct qedr_dev *dev;
268 dev = get_qedr_dev(ibdev);
270 DP_ERR(dev, "invalid_port=0x%x\n", port);
277 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
282 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
286 mm->key.phy_addr = phy_addr;
287 /* This function might be called with a length which is not a multiple
288 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
289 * forces this granularity by increasing the requested size if needed.
290 * When qedr_mmap is called, it will search the list with the updated
291 * length as a key. To prevent search failures, the length is rounded up
292 * in advance to PAGE_SIZE.
294 mm->key.len = roundup(len, PAGE_SIZE);
295 INIT_LIST_HEAD(&mm->entry);
297 mutex_lock(&uctx->mm_list_lock);
298 list_add(&mm->entry, &uctx->mm_head);
299 mutex_unlock(&uctx->mm_list_lock);
301 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
302 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
303 (unsigned long long)mm->key.phy_addr,
304 (unsigned long)mm->key.len, uctx);
309 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
315 mutex_lock(&uctx->mm_list_lock);
316 list_for_each_entry(mm, &uctx->mm_head, entry) {
317 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
323 mutex_unlock(&uctx->mm_list_lock);
324 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
325 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
326 mm->key.phy_addr, mm->key.len, uctx, found);
331 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
332 struct ib_udata *udata)
335 struct qedr_ucontext *ctx;
336 struct qedr_alloc_ucontext_resp uresp;
337 struct qedr_dev *dev = get_qedr_dev(ibdev);
338 struct qed_rdma_add_user_out_params oparams;
341 return ERR_PTR(-EFAULT);
343 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
345 return ERR_PTR(-ENOMEM);
347 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
350 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
355 ctx->dpi = oparams.dpi;
356 ctx->dpi_addr = oparams.dpi_addr;
357 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
358 ctx->dpi_size = oparams.dpi_size;
359 INIT_LIST_HEAD(&ctx->mm_head);
360 mutex_init(&ctx->mm_list_lock);
362 memset(&uresp, 0, sizeof(uresp));
364 uresp.dpm_enabled = dev->user_dpm_enabled;
365 uresp.wids_enabled = 1;
366 uresp.wid_count = oparams.wid_count;
367 uresp.db_pa = ctx->dpi_phys_addr;
368 uresp.db_size = ctx->dpi_size;
369 uresp.max_send_wr = dev->attr.max_sqe;
370 uresp.max_recv_wr = dev->attr.max_rqe;
371 uresp.max_srq_wr = dev->attr.max_srq_wr;
372 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
373 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
374 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
375 uresp.max_cqes = QEDR_MAX_CQES;
377 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
383 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
387 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
389 return &ctx->ibucontext;
396 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
398 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
399 struct qedr_mm *mm, *tmp;
402 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
404 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
406 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
407 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
408 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
409 mm->key.phy_addr, mm->key.len, uctx);
410 list_del(&mm->entry);
418 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
420 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
421 struct qedr_dev *dev = get_qedr_dev(context->device);
422 unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
423 unsigned long len = (vma->vm_end - vma->vm_start);
424 unsigned long dpi_start;
426 dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
428 DP_DEBUG(dev, QEDR_MSG_INIT,
429 "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
430 (void *)vma->vm_start, (void *)vma->vm_end,
431 (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
433 if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
435 "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
436 (void *)vma->vm_start, (void *)vma->vm_end);
440 if (!qedr_search_mmap(ucontext, phys_addr, len)) {
441 DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
446 if (phys_addr < dpi_start ||
447 ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
449 "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
450 (void *)phys_addr, (void *)dpi_start,
455 if (vma->vm_flags & VM_READ) {
456 DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
460 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
461 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
465 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
466 struct ib_ucontext *context, struct ib_udata *udata)
468 struct qedr_dev *dev = get_qedr_dev(ibdev);
473 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
474 (udata && context) ? "User Lib" : "Kernel");
476 if (!dev->rdma_ctx) {
477 DP_ERR(dev, "invalid RDMA context\n");
478 return ERR_PTR(-EINVAL);
481 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
483 return ERR_PTR(-ENOMEM);
485 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
491 if (udata && context) {
492 struct qedr_alloc_pd_uresp uresp = {
496 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
498 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
499 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
503 pd->uctx = get_qedr_ucontext(context);
514 int qedr_dealloc_pd(struct ib_pd *ibpd)
516 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
517 struct qedr_pd *pd = get_qedr_pd(ibpd);
520 pr_err("Invalid PD received in dealloc_pd\n");
524 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
525 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
532 static void qedr_free_pbl(struct qedr_dev *dev,
533 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
535 struct pci_dev *pdev = dev->pdev;
538 for (i = 0; i < pbl_info->num_pbls; i++) {
541 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
542 pbl[i].va, pbl[i].pa);
548 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
549 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
551 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
552 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
553 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
555 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
556 struct qedr_pbl_info *pbl_info,
559 struct pci_dev *pdev = dev->pdev;
560 struct qedr_pbl *pbl_table;
561 dma_addr_t *pbl_main_tbl;
566 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
568 return ERR_PTR(-ENOMEM);
570 for (i = 0; i < pbl_info->num_pbls; i++) {
571 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
576 pbl_table[i].va = va;
577 pbl_table[i].pa = pa;
580 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
581 * the first one with physical pointers to all of the rest
583 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
584 for (i = 0; i < pbl_info->num_pbls - 1; i++)
585 pbl_main_tbl[i] = pbl_table[i + 1].pa;
590 for (i--; i >= 0; i--)
591 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
592 pbl_table[i].va, pbl_table[i].pa);
594 qedr_free_pbl(dev, pbl_info, pbl_table);
596 return ERR_PTR(-ENOMEM);
599 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
600 struct qedr_pbl_info *pbl_info,
601 u32 num_pbes, int two_layer_capable)
607 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
608 if (num_pbes > MAX_PBES_TWO_LAYER) {
609 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
614 /* calculate required pbl page size */
615 pbl_size = MIN_FW_PBL_PAGE_SIZE;
616 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
617 NUM_PBES_ON_PAGE(pbl_size);
619 while (pbl_capacity < num_pbes) {
621 pbl_capacity = pbl_size / sizeof(u64);
622 pbl_capacity = pbl_capacity * pbl_capacity;
625 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
626 num_pbls++; /* One for the layer0 ( points to the pbls) */
627 pbl_info->two_layered = true;
629 /* One layered PBL */
631 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
632 roundup_pow_of_two((num_pbes * sizeof(u64))));
633 pbl_info->two_layered = false;
636 pbl_info->num_pbls = num_pbls;
637 pbl_info->pbl_size = pbl_size;
638 pbl_info->num_pbes = num_pbes;
640 DP_DEBUG(dev, QEDR_MSG_MR,
641 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
642 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
647 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
648 struct qedr_pbl *pbl,
649 struct qedr_pbl_info *pbl_info, u32 pg_shift)
651 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
652 u32 fw_pg_cnt, fw_pg_per_umem_pg;
653 struct qedr_pbl *pbl_tbl;
654 struct scatterlist *sg;
659 if (!pbl_info->num_pbes)
662 /* If we have a two layered pbl, the first pbl points to the rest
663 * of the pbls and the first entry lays on the second pbl in the table
665 if (pbl_info->two_layered)
670 pbe = (struct regpair *)pbl_tbl->va;
672 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
678 shift = umem->page_shift;
680 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
682 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
683 pages = sg_dma_len(sg) >> shift;
684 pg_addr = sg_dma_address(sg);
685 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
686 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
687 pbe->lo = cpu_to_le32(pg_addr);
688 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
690 pg_addr += BIT(pg_shift);
695 if (total_num_pbes == pbl_info->num_pbes)
698 /* If the given pbl is full storing the pbes,
702 (pbl_info->pbl_size / sizeof(u64))) {
704 pbe = (struct regpair *)pbl_tbl->va;
714 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
715 struct qedr_cq *cq, struct ib_udata *udata)
717 struct qedr_create_cq_uresp uresp;
720 memset(&uresp, 0, sizeof(uresp));
722 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
723 uresp.icid = cq->icid;
725 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
727 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
732 static void consume_cqe(struct qedr_cq *cq)
734 if (cq->latest_cqe == cq->toggle_cqe)
735 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
737 cq->latest_cqe = qed_chain_consume(&cq->pbl);
740 static inline int qedr_align_cq_entries(int entries)
742 u64 size, aligned_size;
744 /* We allocate an extra entry that we don't report to the FW. */
745 size = (entries + 1) * QEDR_CQE_SIZE;
746 aligned_size = ALIGN(size, PAGE_SIZE);
748 return aligned_size / QEDR_CQE_SIZE;
751 static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
752 struct qedr_dev *dev,
753 struct qedr_userq *q,
754 u64 buf_addr, size_t buf_len,
755 int access, int dmasync,
761 q->buf_addr = buf_addr;
762 q->buf_len = buf_len;
763 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
764 if (IS_ERR(q->umem)) {
765 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
767 return PTR_ERR(q->umem);
770 fw_pages = ib_umem_page_count(q->umem) <<
771 (q->umem->page_shift - FW_PAGE_SHIFT);
773 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
777 if (alloc_and_init) {
778 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
779 if (IS_ERR(q->pbl_tbl)) {
780 rc = PTR_ERR(q->pbl_tbl);
783 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
786 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
796 ib_umem_release(q->umem);
802 static inline void qedr_init_cq_params(struct qedr_cq *cq,
803 struct qedr_ucontext *ctx,
804 struct qedr_dev *dev, int vector,
805 int chain_entries, int page_cnt,
807 struct qed_rdma_create_cq_in_params
810 memset(params, 0, sizeof(*params));
811 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
812 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
813 params->cnq_id = vector;
814 params->cq_size = chain_entries - 1;
815 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
816 params->pbl_num_pages = page_cnt;
817 params->pbl_ptr = pbl_ptr;
818 params->pbl_two_level = 0;
821 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
823 cq->db.data.agg_flags = flags;
824 cq->db.data.value = cpu_to_le32(cons);
825 writeq(cq->db.raw, cq->db_addr);
827 /* Make sure write would stick */
831 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
833 struct qedr_cq *cq = get_qedr_cq(ibcq);
834 unsigned long sflags;
835 struct qedr_dev *dev;
837 dev = get_qedr_dev(ibcq->device);
841 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
847 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
850 spin_lock_irqsave(&cq->cq_lock, sflags);
854 if (flags & IB_CQ_SOLICITED)
855 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
857 if (flags & IB_CQ_NEXT_COMP)
858 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
860 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
862 spin_unlock_irqrestore(&cq->cq_lock, sflags);
867 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
868 const struct ib_cq_init_attr *attr,
869 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
871 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
872 struct qed_rdma_destroy_cq_out_params destroy_oparams;
873 struct qed_rdma_destroy_cq_in_params destroy_iparams;
874 struct qedr_dev *dev = get_qedr_dev(ibdev);
875 struct qed_rdma_create_cq_in_params params;
876 struct qedr_create_cq_ureq ureq;
877 int vector = attr->comp_vector;
878 int entries = attr->cqe;
886 DP_DEBUG(dev, QEDR_MSG_INIT,
887 "create_cq: called from %s. entries=%d, vector=%d\n",
888 udata ? "User Lib" : "Kernel", entries, vector);
890 if (entries > QEDR_MAX_CQES) {
892 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
893 entries, QEDR_MAX_CQES);
894 return ERR_PTR(-EINVAL);
897 chain_entries = qedr_align_cq_entries(entries);
898 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
900 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
902 return ERR_PTR(-ENOMEM);
905 memset(&ureq, 0, sizeof(ureq));
906 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
908 "create cq: problem copying data from user space\n");
914 "create cq: cannot create a cq with 0 entries\n");
918 cq->cq_type = QEDR_CQ_TYPE_USER;
920 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
921 ureq.len, IB_ACCESS_LOCAL_WRITE,
926 pbl_ptr = cq->q.pbl_tbl->pa;
927 page_cnt = cq->q.pbl_info.num_pbes;
929 cq->ibcq.cqe = chain_entries;
931 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
933 rc = dev->ops->common->chain_alloc(dev->cdev,
934 QED_CHAIN_USE_TO_CONSUME,
936 QED_CHAIN_CNT_TYPE_U32,
938 sizeof(union rdma_cqe),
943 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
944 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
945 cq->ibcq.cqe = cq->pbl.capacity;
948 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
951 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
956 cq->sig = QEDR_CQ_MAGIC_NUMBER;
957 spin_lock_init(&cq->cq_lock);
960 rc = qedr_copy_cq_uresp(dev, cq, udata);
964 /* Generate doorbell address. */
965 cq->db_addr = dev->db_addr +
966 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
967 cq->db.data.icid = cq->icid;
968 cq->db.data.params = DB_AGG_CMD_SET <<
969 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
971 /* point to the very last element, passing it we will toggle */
972 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
973 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
974 cq->latest_cqe = NULL;
976 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
979 DP_DEBUG(dev, QEDR_MSG_CQ,
980 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
981 cq->icid, cq, params.cq_size);
986 destroy_iparams.icid = cq->icid;
987 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
991 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
993 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
996 ib_umem_release(cq->q.umem);
999 return ERR_PTR(-EINVAL);
1002 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1004 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1005 struct qedr_cq *cq = get_qedr_cq(ibcq);
1007 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1012 #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1013 #define QEDR_DESTROY_CQ_ITER_DURATION (10)
1015 int qedr_destroy_cq(struct ib_cq *ibcq)
1017 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1018 struct qed_rdma_destroy_cq_out_params oparams;
1019 struct qed_rdma_destroy_cq_in_params iparams;
1020 struct qedr_cq *cq = get_qedr_cq(ibcq);
1024 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1028 /* GSIs CQs are handled by driver, so they don't exist in the FW */
1029 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1032 iparams.icid = cq->icid;
1033 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1037 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1039 if (ibcq->uobject && ibcq->uobject->context) {
1040 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1041 ib_umem_release(cq->q.umem);
1044 /* We don't want the IRQ handler to handle a non-existing CQ so we
1045 * wait until all CNQ interrupts, if any, are received. This will always
1046 * happen and will always happen very fast. If not, then a serious error
1047 * has occured. That is why we can use a long delay.
1048 * We spin for a short time so we don’t lose time on context switching
1049 * in case all the completions are handled in that span. Otherwise
1050 * we sleep for a while and check again. Since the CNQ may be
1051 * associated with (only) the current CPU we use msleep to allow the
1052 * current CPU to be freed.
1053 * The CNQ notification is increased in qedr_irq_handler().
1055 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1056 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1057 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1061 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1062 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1063 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1067 if (oparams.num_cq_notif != cq->cnq_notif)
1070 /* Note that we don't need to have explicit code to wait for the
1071 * completion of the event handler because it is invoked from the EQ.
1072 * Since the destroy CQ ramrod has also been received on the EQ we can
1073 * be certain that there's no event handler in process.
1084 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1085 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1090 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1091 struct ib_qp_attr *attr,
1093 struct qed_rdma_modify_qp_in_params
1096 const struct ib_gid_attr *gid_attr;
1097 enum rdma_network_type nw_type;
1098 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1102 gid_attr = grh->sgid_attr;
1103 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
1105 nw_type = rdma_gid_attr_network_type(gid_attr);
1107 case RDMA_NETWORK_IPV6:
1108 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1109 sizeof(qp_params->sgid));
1110 memcpy(&qp_params->dgid.bytes[0],
1112 sizeof(qp_params->dgid));
1113 qp_params->roce_mode = ROCE_V2_IPV6;
1114 SET_FIELD(qp_params->modify_flags,
1115 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1117 case RDMA_NETWORK_IB:
1118 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1119 sizeof(qp_params->sgid));
1120 memcpy(&qp_params->dgid.bytes[0],
1122 sizeof(qp_params->dgid));
1123 qp_params->roce_mode = ROCE_V1;
1125 case RDMA_NETWORK_IPV4:
1126 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1127 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1128 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1129 qp_params->sgid.ipv4_addr = ipv4_addr;
1131 qedr_get_ipv4_from_gid(grh->dgid.raw);
1132 qp_params->dgid.ipv4_addr = ipv4_addr;
1133 SET_FIELD(qp_params->modify_flags,
1134 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1135 qp_params->roce_mode = ROCE_V2_IPV4;
1139 for (i = 0; i < 4; i++) {
1140 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1141 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1144 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1145 qp_params->vlan_id = 0;
1150 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1151 struct ib_qp_init_attr *attrs)
1153 struct qedr_device_attr *qattr = &dev->attr;
1155 /* QP0... attrs->qp_type == IB_QPT_GSI */
1156 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1157 DP_DEBUG(dev, QEDR_MSG_QP,
1158 "create qp: unsupported qp type=0x%x requested\n",
1163 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1165 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1166 attrs->cap.max_send_wr, qattr->max_sqe);
1170 if (attrs->cap.max_inline_data > qattr->max_inline) {
1172 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1173 attrs->cap.max_inline_data, qattr->max_inline);
1177 if (attrs->cap.max_send_sge > qattr->max_sge) {
1179 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1180 attrs->cap.max_send_sge, qattr->max_sge);
1184 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1186 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1187 attrs->cap.max_recv_sge, qattr->max_sge);
1191 /* Unprivileged user space cannot create special QP */
1192 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1194 "create qp: userspace can't create special QPs of type=0x%x\n",
1202 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1203 struct qedr_srq *srq, struct ib_udata *udata)
1205 struct qedr_create_srq_uresp uresp = {};
1208 uresp.srq_id = srq->srq_id;
1210 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1212 DP_ERR(dev, "create srq: problem copying data to user space\n");
1217 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1218 struct qedr_create_qp_uresp *uresp,
1221 /* iWARP requires two doorbells per RQ. */
1222 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1223 uresp->rq_db_offset =
1224 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1225 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1227 uresp->rq_db_offset =
1228 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1231 uresp->rq_icid = qp->icid;
1234 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1235 struct qedr_create_qp_uresp *uresp,
1238 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1240 /* iWARP uses the same cid for rq and sq */
1241 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1242 uresp->sq_icid = qp->icid;
1244 uresp->sq_icid = qp->icid + 1;
1247 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1248 struct qedr_qp *qp, struct ib_udata *udata)
1250 struct qedr_create_qp_uresp uresp;
1253 memset(&uresp, 0, sizeof(uresp));
1254 qedr_copy_sq_uresp(dev, &uresp, qp);
1255 qedr_copy_rq_uresp(dev, &uresp, qp);
1257 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1258 uresp.qp_id = qp->qp_id;
1260 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1263 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1269 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1272 struct ib_qp_init_attr *attrs)
1274 spin_lock_init(&qp->q_lock);
1275 atomic_set(&qp->refcnt, 1);
1277 qp->qp_type = attrs->qp_type;
1278 qp->max_inline_data = attrs->cap.max_inline_data;
1279 qp->sq.max_sges = attrs->cap.max_send_sge;
1280 qp->state = QED_ROCE_QP_STATE_RESET;
1281 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1282 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1286 qp->srq = get_qedr_srq(attrs->srq);
1288 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1289 qp->rq.max_sges = attrs->cap.max_recv_sge;
1290 DP_DEBUG(dev, QEDR_MSG_QP,
1291 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1292 qp->rq.max_sges, qp->rq_cq->icid);
1295 DP_DEBUG(dev, QEDR_MSG_QP,
1296 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1297 pd->pd_id, qp->qp_type, qp->max_inline_data,
1298 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1299 DP_DEBUG(dev, QEDR_MSG_QP,
1300 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1301 qp->sq.max_sges, qp->sq_cq->icid);
1304 static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1306 qp->sq.db = dev->db_addr +
1307 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1308 qp->sq.db_data.data.icid = qp->icid + 1;
1310 qp->rq.db = dev->db_addr +
1311 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1312 qp->rq.db_data.data.icid = qp->icid;
1316 static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
1317 struct ib_srq_init_attr *attrs,
1318 struct ib_udata *udata)
1320 struct qedr_device_attr *qattr = &dev->attr;
1322 if (attrs->attr.max_wr > qattr->max_srq_wr) {
1324 "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1325 attrs->attr.max_wr, qattr->max_srq_wr);
1329 if (attrs->attr.max_sge > qattr->max_sge) {
1331 "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1332 attrs->attr.max_sge, qattr->max_sge);
1339 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1341 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1342 ib_umem_release(srq->usrq.umem);
1343 ib_umem_release(srq->prod_umem);
1346 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1348 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1349 struct qedr_dev *dev = srq->dev;
1351 dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1353 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1354 hw_srq->virt_prod_pair_addr,
1355 hw_srq->phy_prod_pair_addr);
1358 static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
1359 struct qedr_srq *srq,
1360 struct qedr_create_srq_ureq *ureq,
1361 int access, int dmasync)
1363 struct scatterlist *sg;
1366 rc = qedr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
1367 ureq->srq_len, access, dmasync, 1);
1371 srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
1372 sizeof(struct rdma_srq_producers),
1374 if (IS_ERR(srq->prod_umem)) {
1375 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1376 ib_umem_release(srq->usrq.umem);
1378 "create srq: failed ib_umem_get for producer, got %ld\n",
1379 PTR_ERR(srq->prod_umem));
1380 return PTR_ERR(srq->prod_umem);
1383 sg = srq->prod_umem->sg_head.sgl;
1384 srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1389 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1390 struct qedr_dev *dev,
1391 struct ib_srq_init_attr *init_attr)
1393 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1394 dma_addr_t phy_prod_pair_addr;
1399 va = dma_alloc_coherent(&dev->pdev->dev,
1400 sizeof(struct rdma_srq_producers),
1401 &phy_prod_pair_addr, GFP_KERNEL);
1404 "create srq: failed to allocate dma memory for producer\n");
1408 hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1409 hw_srq->virt_prod_pair_addr = va;
1411 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1412 rc = dev->ops->common->chain_alloc(dev->cdev,
1413 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1415 QED_CHAIN_CNT_TYPE_U32,
1417 QEDR_SRQ_WQE_ELEM_SIZE,
1418 &hw_srq->pbl, NULL);
1422 hw_srq->num_elems = num_elems;
1427 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1428 va, phy_prod_pair_addr);
1432 static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
1434 static void qedr_idr_remove(struct qedr_dev *dev,
1435 struct qedr_idr *qidr, u32 id);
1437 struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
1438 struct ib_srq_init_attr *init_attr,
1439 struct ib_udata *udata)
1441 struct qed_rdma_destroy_srq_in_params destroy_in_params;
1442 struct qed_rdma_create_srq_in_params in_params = {};
1443 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1444 struct qed_rdma_create_srq_out_params out_params;
1445 struct qedr_pd *pd = get_qedr_pd(ibpd);
1446 struct qedr_create_srq_ureq ureq = {};
1447 u64 pbl_base_addr, phy_prod_pair_addr;
1448 struct ib_ucontext *ib_ctx = NULL;
1449 struct qedr_srq_hwq_info *hw_srq;
1450 struct qedr_ucontext *ctx = NULL;
1451 u32 page_cnt, page_size;
1452 struct qedr_srq *srq;
1455 DP_DEBUG(dev, QEDR_MSG_QP,
1456 "create SRQ called from %s (pd %p)\n",
1457 (udata) ? "User lib" : "kernel", pd);
1459 rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
1461 return ERR_PTR(-EINVAL);
1463 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1465 return ERR_PTR(-ENOMEM);
1468 hw_srq = &srq->hw_srq;
1469 spin_lock_init(&srq->lock);
1471 hw_srq->max_wr = init_attr->attr.max_wr;
1472 hw_srq->max_sges = init_attr->attr.max_sge;
1474 if (udata && ibpd->uobject && ibpd->uobject->context) {
1475 ib_ctx = ibpd->uobject->context;
1476 ctx = get_qedr_ucontext(ib_ctx);
1478 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1480 "create srq: problem copying data from user space\n");
1484 rc = qedr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
1488 page_cnt = srq->usrq.pbl_info.num_pbes;
1489 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1490 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1491 page_size = BIT(srq->usrq.umem->page_shift);
1493 struct qed_chain *pbl;
1495 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1500 page_cnt = qed_chain_get_page_cnt(pbl);
1501 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1502 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1503 page_size = QED_CHAIN_PAGE_SIZE;
1506 in_params.pd_id = pd->pd_id;
1507 in_params.pbl_base_addr = pbl_base_addr;
1508 in_params.prod_pair_addr = phy_prod_pair_addr;
1509 in_params.num_pages = page_cnt;
1510 in_params.page_size = page_size;
1512 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1516 srq->srq_id = out_params.srq_id;
1519 rc = qedr_copy_srq_uresp(dev, srq, udata);
1524 rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
1528 DP_DEBUG(dev, QEDR_MSG_SRQ,
1529 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1533 destroy_in_params.srq_id = srq->srq_id;
1535 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1538 qedr_free_srq_user_params(srq);
1540 qedr_free_srq_kernel_params(srq);
1544 return ERR_PTR(-EFAULT);
1547 int qedr_destroy_srq(struct ib_srq *ibsrq)
1549 struct qed_rdma_destroy_srq_in_params in_params = {};
1550 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1551 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1553 qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
1554 in_params.srq_id = srq->srq_id;
1555 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1557 if (ibsrq->pd->uobject)
1558 qedr_free_srq_user_params(srq);
1560 qedr_free_srq_kernel_params(srq);
1562 DP_DEBUG(dev, QEDR_MSG_SRQ,
1563 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1570 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1571 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1573 struct qed_rdma_modify_srq_in_params in_params = {};
1574 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1575 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1578 if (attr_mask & IB_SRQ_MAX_WR) {
1580 "modify srq: invalid attribute mask=0x%x specified for %p\n",
1585 if (attr_mask & IB_SRQ_LIMIT) {
1586 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1588 "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1589 attr->srq_limit, srq->hw_srq.max_wr);
1593 in_params.srq_id = srq->srq_id;
1594 in_params.wqe_limit = attr->srq_limit;
1595 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1600 srq->srq_limit = attr->srq_limit;
1602 DP_DEBUG(dev, QEDR_MSG_SRQ,
1603 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1609 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1612 struct ib_qp_init_attr *attrs,
1613 bool fmr_and_reserved_lkey,
1614 struct qed_rdma_create_qp_in_params *params)
1616 /* QP handle to be written in an async event */
1617 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1618 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1620 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1621 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1622 params->pd = pd->pd_id;
1623 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1624 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1625 params->stats_queue = 0;
1627 params->use_srq = false;
1630 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1633 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1634 params->srq_id = qp->srq->srq_id;
1635 params->use_srq = true;
1639 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1641 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1650 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1653 static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
1658 idr_preload(GFP_KERNEL);
1659 spin_lock_irq(&qidr->idr_lock);
1661 rc = idr_alloc(&qidr->idr, ptr, id, id + 1, GFP_ATOMIC);
1663 spin_unlock_irq(&qidr->idr_lock);
1666 return rc < 0 ? rc : 0;
1669 static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
1671 spin_lock_irq(&qidr->idr_lock);
1672 idr_remove(&qidr->idr, id);
1673 spin_unlock_irq(&qidr->idr_lock);
1677 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1679 struct qed_rdma_create_qp_out_params *out_params)
1681 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1682 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1684 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1685 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1687 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1688 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1691 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1692 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1695 static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1698 ib_umem_release(qp->usq.umem);
1699 qp->usq.umem = NULL;
1702 ib_umem_release(qp->urq.umem);
1703 qp->urq.umem = NULL;
1706 static int qedr_create_user_qp(struct qedr_dev *dev,
1709 struct ib_udata *udata,
1710 struct ib_qp_init_attr *attrs)
1712 struct qed_rdma_create_qp_in_params in_params;
1713 struct qed_rdma_create_qp_out_params out_params;
1714 struct qedr_pd *pd = get_qedr_pd(ibpd);
1715 struct ib_ucontext *ib_ctx = NULL;
1716 struct qedr_create_qp_ureq ureq;
1717 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1720 ib_ctx = ibpd->uobject->context;
1722 memset(&ureq, 0, sizeof(ureq));
1723 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1725 DP_ERR(dev, "Problem copying data from user space\n");
1729 /* SQ - read access only (0), dma sync not required (0) */
1730 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
1731 ureq.sq_len, 0, 0, alloc_and_init);
1736 /* RQ - read access only (0), dma sync not required (0) */
1737 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
1738 ureq.rq_len, 0, 0, alloc_and_init);
1743 memset(&in_params, 0, sizeof(in_params));
1744 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1745 in_params.qp_handle_lo = ureq.qp_handle_lo;
1746 in_params.qp_handle_hi = ureq.qp_handle_hi;
1747 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1748 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1750 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1751 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1754 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1755 &in_params, &out_params);
1762 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1763 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1765 qp->qp_id = out_params.qp_id;
1766 qp->icid = out_params.icid;
1768 rc = qedr_copy_qp_uresp(dev, qp, udata);
1772 qedr_qp_user_print(dev, qp);
1776 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1778 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1781 qedr_cleanup_user(dev, qp);
1785 static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1787 qp->sq.db = dev->db_addr +
1788 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1789 qp->sq.db_data.data.icid = qp->icid;
1791 qp->rq.db = dev->db_addr +
1792 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1793 qp->rq.db_data.data.icid = qp->icid;
1794 qp->rq.iwarp_db2 = dev->db_addr +
1795 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1796 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1797 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1801 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1803 struct qed_rdma_create_qp_in_params *in_params,
1804 u32 n_sq_elems, u32 n_rq_elems)
1806 struct qed_rdma_create_qp_out_params out_params;
1809 rc = dev->ops->common->chain_alloc(dev->cdev,
1810 QED_CHAIN_USE_TO_PRODUCE,
1812 QED_CHAIN_CNT_TYPE_U32,
1814 QEDR_SQE_ELEMENT_SIZE,
1820 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1821 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1823 rc = dev->ops->common->chain_alloc(dev->cdev,
1824 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1826 QED_CHAIN_CNT_TYPE_U32,
1828 QEDR_RQE_ELEMENT_SIZE,
1833 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1834 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1836 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1837 in_params, &out_params);
1842 qp->qp_id = out_params.qp_id;
1843 qp->icid = out_params.icid;
1845 qedr_set_roce_db_info(dev, qp);
1850 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1852 struct qed_rdma_create_qp_in_params *in_params,
1853 u32 n_sq_elems, u32 n_rq_elems)
1855 struct qed_rdma_create_qp_out_params out_params;
1856 struct qed_chain_ext_pbl ext_pbl;
1859 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1860 QEDR_SQE_ELEMENT_SIZE,
1861 QED_CHAIN_MODE_PBL);
1862 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1863 QEDR_RQE_ELEMENT_SIZE,
1864 QED_CHAIN_MODE_PBL);
1866 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1867 in_params, &out_params);
1872 /* Now we allocate the chain */
1873 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1874 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1876 rc = dev->ops->common->chain_alloc(dev->cdev,
1877 QED_CHAIN_USE_TO_PRODUCE,
1879 QED_CHAIN_CNT_TYPE_U32,
1881 QEDR_SQE_ELEMENT_SIZE,
1882 &qp->sq.pbl, &ext_pbl);
1887 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1888 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1890 rc = dev->ops->common->chain_alloc(dev->cdev,
1891 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1893 QED_CHAIN_CNT_TYPE_U32,
1895 QEDR_RQE_ELEMENT_SIZE,
1896 &qp->rq.pbl, &ext_pbl);
1901 qp->qp_id = out_params.qp_id;
1902 qp->icid = out_params.icid;
1904 qedr_set_iwarp_db_info(dev, qp);
1908 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1913 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
1915 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1916 kfree(qp->wqe_wr_id);
1918 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1919 kfree(qp->rqe_wr_id);
1922 static int qedr_create_kernel_qp(struct qedr_dev *dev,
1925 struct ib_qp_init_attr *attrs)
1927 struct qed_rdma_create_qp_in_params in_params;
1928 struct qedr_pd *pd = get_qedr_pd(ibpd);
1934 memset(&in_params, 0, sizeof(in_params));
1936 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1937 * the ring. The ring should allow at least a single WR, even if the
1938 * user requested none, due to allocation issues.
1939 * We should add an extra WR since the prod and cons indices of
1940 * wqe_wr_id are managed in such a way that the WQ is considered full
1941 * when (prod+1)%max_wr==cons. We currently don't do that because we
1942 * double the number of entries due an iSER issue that pushes far more
1943 * WRs than indicated. If we decline its ib_post_send() then we get
1944 * error prints in the dmesg we'd like to avoid.
1946 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1949 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1951 if (!qp->wqe_wr_id) {
1952 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1956 /* QP handle to be written in CQE */
1957 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1958 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
1960 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1961 * the ring. There ring should allow at least a single WR, even if the
1962 * user requested none, due to allocation issues.
1964 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
1966 /* Allocate driver internal RQ array */
1967 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1969 if (!qp->rqe_wr_id) {
1971 "create qp: failed RQ shadow memory allocation\n");
1972 kfree(qp->wqe_wr_id);
1976 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
1978 n_sq_entries = attrs->cap.max_send_wr;
1979 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1980 n_sq_entries = max_t(u32, n_sq_entries, 1);
1981 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1983 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1985 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1986 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1987 n_sq_elems, n_rq_elems);
1989 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1990 n_sq_elems, n_rq_elems);
1992 qedr_cleanup_kernel(dev, qp);
1997 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1998 struct ib_qp_init_attr *attrs,
1999 struct ib_udata *udata)
2001 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2002 struct qedr_pd *pd = get_qedr_pd(ibpd);
2007 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2008 udata ? "user library" : "kernel", pd);
2010 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
2014 DP_DEBUG(dev, QEDR_MSG_QP,
2015 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2016 udata ? "user library" : "kernel", attrs->event_handler, pd,
2017 get_qedr_cq(attrs->send_cq),
2018 get_qedr_cq(attrs->send_cq)->icid,
2019 get_qedr_cq(attrs->recv_cq),
2020 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2022 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2024 DP_ERR(dev, "create qp: failed allocating memory\n");
2025 return ERR_PTR(-ENOMEM);
2028 qedr_set_common_qp_params(dev, qp, pd, attrs);
2030 if (attrs->qp_type == IB_QPT_GSI) {
2031 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
2038 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2040 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2045 qp->ibqp.qp_num = qp->qp_id;
2047 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2048 rc = qedr_idr_add(dev, &dev->qpidr, qp, qp->qp_id);
2058 return ERR_PTR(-EFAULT);
2061 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2064 case QED_ROCE_QP_STATE_RESET:
2065 return IB_QPS_RESET;
2066 case QED_ROCE_QP_STATE_INIT:
2068 case QED_ROCE_QP_STATE_RTR:
2070 case QED_ROCE_QP_STATE_RTS:
2072 case QED_ROCE_QP_STATE_SQD:
2074 case QED_ROCE_QP_STATE_ERR:
2076 case QED_ROCE_QP_STATE_SQE:
2082 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2083 enum ib_qp_state qp_state)
2087 return QED_ROCE_QP_STATE_RESET;
2089 return QED_ROCE_QP_STATE_INIT;
2091 return QED_ROCE_QP_STATE_RTR;
2093 return QED_ROCE_QP_STATE_RTS;
2095 return QED_ROCE_QP_STATE_SQD;
2097 return QED_ROCE_QP_STATE_ERR;
2099 return QED_ROCE_QP_STATE_ERR;
2103 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
2105 qed_chain_reset(&qph->pbl);
2109 qph->db_data.data.value = cpu_to_le16(0);
2112 static int qedr_update_qp_state(struct qedr_dev *dev,
2114 enum qed_roce_qp_state cur_state,
2115 enum qed_roce_qp_state new_state)
2119 if (new_state == cur_state)
2122 switch (cur_state) {
2123 case QED_ROCE_QP_STATE_RESET:
2124 switch (new_state) {
2125 case QED_ROCE_QP_STATE_INIT:
2126 qp->prev_wqe_size = 0;
2127 qedr_reset_qp_hwq_info(&qp->sq);
2128 qedr_reset_qp_hwq_info(&qp->rq);
2135 case QED_ROCE_QP_STATE_INIT:
2136 switch (new_state) {
2137 case QED_ROCE_QP_STATE_RTR:
2138 /* Update doorbell (in case post_recv was
2139 * done before move to RTR)
2142 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2143 writel(qp->rq.db_data.raw, qp->rq.db);
2144 /* Make sure write takes effect */
2148 case QED_ROCE_QP_STATE_ERR:
2151 /* Invalid state change. */
2156 case QED_ROCE_QP_STATE_RTR:
2158 switch (new_state) {
2159 case QED_ROCE_QP_STATE_RTS:
2161 case QED_ROCE_QP_STATE_ERR:
2164 /* Invalid state change. */
2169 case QED_ROCE_QP_STATE_RTS:
2171 switch (new_state) {
2172 case QED_ROCE_QP_STATE_SQD:
2174 case QED_ROCE_QP_STATE_ERR:
2177 /* Invalid state change. */
2182 case QED_ROCE_QP_STATE_SQD:
2184 switch (new_state) {
2185 case QED_ROCE_QP_STATE_RTS:
2186 case QED_ROCE_QP_STATE_ERR:
2189 /* Invalid state change. */
2194 case QED_ROCE_QP_STATE_ERR:
2196 switch (new_state) {
2197 case QED_ROCE_QP_STATE_RESET:
2198 if ((qp->rq.prod != qp->rq.cons) ||
2199 (qp->sq.prod != qp->sq.cons)) {
2201 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2202 qp->rq.prod, qp->rq.cons, qp->sq.prod,
2220 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2221 int attr_mask, struct ib_udata *udata)
2223 struct qedr_qp *qp = get_qedr_qp(ibqp);
2224 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2225 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2226 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2227 enum ib_qp_state old_qp_state, new_qp_state;
2228 enum qed_roce_qp_state cur_state;
2231 DP_DEBUG(dev, QEDR_MSG_QP,
2232 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2235 old_qp_state = qedr_get_ibqp_state(qp->state);
2236 if (attr_mask & IB_QP_STATE)
2237 new_qp_state = attr->qp_state;
2239 new_qp_state = old_qp_state;
2241 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2242 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2243 ibqp->qp_type, attr_mask,
2244 IB_LINK_LAYER_ETHERNET)) {
2246 "modify qp: invalid attribute mask=0x%x specified for\n"
2247 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2248 attr_mask, qp->qp_id, ibqp->qp_type,
2249 old_qp_state, new_qp_state);
2255 /* Translate the masks... */
2256 if (attr_mask & IB_QP_STATE) {
2257 SET_FIELD(qp_params.modify_flags,
2258 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2259 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2262 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2263 qp_params.sqd_async = true;
2265 if (attr_mask & IB_QP_PKEY_INDEX) {
2266 SET_FIELD(qp_params.modify_flags,
2267 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2268 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2273 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2276 if (attr_mask & IB_QP_QKEY)
2277 qp->qkey = attr->qkey;
2279 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2280 SET_FIELD(qp_params.modify_flags,
2281 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2282 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2283 IB_ACCESS_REMOTE_READ;
2284 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2285 IB_ACCESS_REMOTE_WRITE;
2286 qp_params.incoming_atomic_en = attr->qp_access_flags &
2287 IB_ACCESS_REMOTE_ATOMIC;
2290 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2291 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2294 if (attr_mask & IB_QP_PATH_MTU) {
2295 if (attr->path_mtu < IB_MTU_256 ||
2296 attr->path_mtu > IB_MTU_4096) {
2297 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2301 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2302 ib_mtu_enum_to_int(iboe_get_mtu
2308 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2309 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2312 SET_FIELD(qp_params.modify_flags,
2313 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2315 qp_params.traffic_class_tos = grh->traffic_class;
2316 qp_params.flow_label = grh->flow_label;
2317 qp_params.hop_limit_ttl = grh->hop_limit;
2319 qp->sgid_idx = grh->sgid_index;
2321 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2324 "modify qp: problems with GID index %d (rc=%d)\n",
2325 grh->sgid_index, rc);
2329 rc = qedr_get_dmac(dev, &attr->ah_attr,
2330 qp_params.remote_mac_addr);
2334 qp_params.use_local_mac = true;
2335 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2337 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2338 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2339 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2340 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2341 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2342 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2343 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2344 qp_params.remote_mac_addr);
2346 qp_params.mtu = qp->mtu;
2347 qp_params.lb_indication = false;
2350 if (!qp_params.mtu) {
2351 /* Stay with current MTU */
2353 qp_params.mtu = qp->mtu;
2356 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2359 if (attr_mask & IB_QP_TIMEOUT) {
2360 SET_FIELD(qp_params.modify_flags,
2361 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2363 /* The received timeout value is an exponent used like this:
2364 * "12.7.34 LOCAL ACK TIMEOUT
2365 * Value representing the transport (ACK) timeout for use by
2366 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2367 * The FW expects timeout in msec so we need to divide the usec
2368 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2369 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2370 * The value of zero means infinite so we use a 'max_t' to make
2371 * sure that sub 1 msec values will be configured as 1 msec.
2374 qp_params.ack_timeout =
2375 1 << max_t(int, attr->timeout - 8, 0);
2377 qp_params.ack_timeout = 0;
2380 if (attr_mask & IB_QP_RETRY_CNT) {
2381 SET_FIELD(qp_params.modify_flags,
2382 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2383 qp_params.retry_cnt = attr->retry_cnt;
2386 if (attr_mask & IB_QP_RNR_RETRY) {
2387 SET_FIELD(qp_params.modify_flags,
2388 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2389 qp_params.rnr_retry_cnt = attr->rnr_retry;
2392 if (attr_mask & IB_QP_RQ_PSN) {
2393 SET_FIELD(qp_params.modify_flags,
2394 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2395 qp_params.rq_psn = attr->rq_psn;
2396 qp->rq_psn = attr->rq_psn;
2399 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2400 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2403 "unsupported max_rd_atomic=%d, supported=%d\n",
2404 attr->max_rd_atomic,
2405 dev->attr.max_qp_req_rd_atomic_resc);
2409 SET_FIELD(qp_params.modify_flags,
2410 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2411 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2414 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2415 SET_FIELD(qp_params.modify_flags,
2416 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2417 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2420 if (attr_mask & IB_QP_SQ_PSN) {
2421 SET_FIELD(qp_params.modify_flags,
2422 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2423 qp_params.sq_psn = attr->sq_psn;
2424 qp->sq_psn = attr->sq_psn;
2427 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2428 if (attr->max_dest_rd_atomic >
2429 dev->attr.max_qp_resp_rd_atomic_resc) {
2431 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2432 attr->max_dest_rd_atomic,
2433 dev->attr.max_qp_resp_rd_atomic_resc);
2439 SET_FIELD(qp_params.modify_flags,
2440 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2441 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2444 if (attr_mask & IB_QP_DEST_QPN) {
2445 SET_FIELD(qp_params.modify_flags,
2446 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2448 qp_params.dest_qp = attr->dest_qp_num;
2449 qp->dest_qp_num = attr->dest_qp_num;
2452 cur_state = qp->state;
2454 /* Update the QP state before the actual ramrod to prevent a race with
2455 * fast path. Modifying the QP state to error will cause the device to
2456 * flush the CQEs and while polling the flushed CQEs will considered as
2457 * a potential issue if the QP isn't in error state.
2459 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2460 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2461 qp->state = QED_ROCE_QP_STATE_ERR;
2463 if (qp->qp_type != IB_QPT_GSI)
2464 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2465 qp->qed_qp, &qp_params);
2467 if (attr_mask & IB_QP_STATE) {
2468 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2469 rc = qedr_update_qp_state(dev, qp, cur_state,
2470 qp_params.new_state);
2471 qp->state = qp_params.new_state;
2478 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2480 int ib_qp_acc_flags = 0;
2482 if (params->incoming_rdma_write_en)
2483 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2484 if (params->incoming_rdma_read_en)
2485 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2486 if (params->incoming_atomic_en)
2487 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2488 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2489 return ib_qp_acc_flags;
2492 int qedr_query_qp(struct ib_qp *ibqp,
2493 struct ib_qp_attr *qp_attr,
2494 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2496 struct qed_rdma_query_qp_out_params params;
2497 struct qedr_qp *qp = get_qedr_qp(ibqp);
2498 struct qedr_dev *dev = qp->dev;
2501 memset(¶ms, 0, sizeof(params));
2503 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2507 memset(qp_attr, 0, sizeof(*qp_attr));
2508 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2510 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2511 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2512 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2513 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2514 qp_attr->rq_psn = params.rq_psn;
2515 qp_attr->sq_psn = params.sq_psn;
2516 qp_attr->dest_qp_num = params.dest_qp;
2518 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2520 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2521 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2522 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2523 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2524 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2525 qp_init_attr->cap = qp_attr->cap;
2527 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2528 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2529 params.flow_label, qp->sgid_idx,
2530 params.hop_limit_ttl, params.traffic_class_tos);
2531 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
2532 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2533 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2534 qp_attr->timeout = params.timeout;
2535 qp_attr->rnr_retry = params.rnr_retry;
2536 qp_attr->retry_cnt = params.retry_cnt;
2537 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2538 qp_attr->pkey_index = params.pkey_index;
2539 qp_attr->port_num = 1;
2540 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2541 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2542 qp_attr->alt_pkey_index = 0;
2543 qp_attr->alt_port_num = 0;
2544 qp_attr->alt_timeout = 0;
2545 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2547 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2548 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2549 qp_attr->max_rd_atomic = params.max_rd_atomic;
2550 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2552 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2553 qp_attr->cap.max_inline_data);
2559 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2563 if (qp->qp_type != IB_QPT_GSI) {
2564 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2569 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2570 qedr_cleanup_user(dev, qp);
2572 qedr_cleanup_kernel(dev, qp);
2577 int qedr_destroy_qp(struct ib_qp *ibqp)
2579 struct qedr_qp *qp = get_qedr_qp(ibqp);
2580 struct qedr_dev *dev = qp->dev;
2581 struct ib_qp_attr attr;
2585 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2588 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2589 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2590 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2591 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2593 attr.qp_state = IB_QPS_ERR;
2594 attr_mask |= IB_QP_STATE;
2596 /* Change the QP state to ERROR */
2597 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2600 /* Wait for the connect/accept to complete */
2604 while (qp->ep->during_connect) {
2605 DP_DEBUG(dev, QEDR_MSG_QP,
2606 "Still in during connect/accept\n");
2609 if (wait_count++ > 200) {
2611 "during connect timeout\n");
2618 if (qp->qp_type == IB_QPT_GSI)
2619 qedr_destroy_gsi_qp(dev);
2621 qedr_free_qp_resources(dev, qp);
2623 if (atomic_dec_and_test(&qp->refcnt) &&
2624 rdma_protocol_iwarp(&dev->ibdev, 1)) {
2625 qedr_idr_remove(dev, &dev->qpidr, qp->qp_id);
2631 struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
2632 struct ib_udata *udata)
2636 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2638 return ERR_PTR(-ENOMEM);
2640 rdma_copy_ah_attr(&ah->attr, attr);
2645 int qedr_destroy_ah(struct ib_ah *ibah)
2647 struct qedr_ah *ah = get_qedr_ah(ibah);
2649 rdma_destroy_ah_attr(&ah->attr);
2654 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2656 struct qedr_pbl *pbl, *tmp;
2658 if (info->pbl_table)
2659 list_add_tail(&info->pbl_table->list_entry,
2660 &info->free_pbl_list);
2662 if (!list_empty(&info->inuse_pbl_list))
2663 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2665 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2666 list_del(&pbl->list_entry);
2667 qedr_free_pbl(dev, &info->pbl_info, pbl);
2671 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2672 size_t page_list_len, bool two_layered)
2674 struct qedr_pbl *tmp;
2677 INIT_LIST_HEAD(&info->free_pbl_list);
2678 INIT_LIST_HEAD(&info->inuse_pbl_list);
2680 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2681 page_list_len, two_layered);
2685 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2686 if (IS_ERR(info->pbl_table)) {
2687 rc = PTR_ERR(info->pbl_table);
2691 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2692 &info->pbl_table->pa);
2694 /* in usual case we use 2 PBLs, so we add one to free
2695 * list and allocating another one
2697 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2699 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2703 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2705 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2709 free_mr_info(dev, info);
2714 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2715 u64 usr_addr, int acc, struct ib_udata *udata)
2717 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2722 pd = get_qedr_pd(ibpd);
2723 DP_DEBUG(dev, QEDR_MSG_MR,
2724 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2725 pd->pd_id, start, len, usr_addr, acc);
2727 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2728 return ERR_PTR(-EINVAL);
2730 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2734 mr->type = QEDR_MR_USER;
2736 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2737 if (IS_ERR(mr->umem)) {
2742 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2746 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2747 &mr->info.pbl_info, mr->umem->page_shift);
2749 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2751 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2755 /* Index only, 18 bit long, lkey = itid << 8 | key */
2756 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2758 mr->hw_mr.pd = pd->pd_id;
2759 mr->hw_mr.local_read = 1;
2760 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2761 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2762 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2763 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2764 mr->hw_mr.mw_bind = false;
2765 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2766 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2767 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2768 mr->hw_mr.page_size_log = mr->umem->page_shift;
2769 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2770 mr->hw_mr.length = len;
2771 mr->hw_mr.vaddr = usr_addr;
2772 mr->hw_mr.zbva = false;
2773 mr->hw_mr.phy_mr = false;
2774 mr->hw_mr.dma_mr = false;
2776 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2778 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2782 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2783 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2784 mr->hw_mr.remote_atomic)
2785 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2787 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2792 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2794 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2800 int qedr_dereg_mr(struct ib_mr *ib_mr)
2802 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2803 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2806 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2810 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2812 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2813 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2815 /* it could be user registered memory. */
2817 ib_umem_release(mr->umem);
2824 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2825 int max_page_list_len)
2827 struct qedr_pd *pd = get_qedr_pd(ibpd);
2828 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2832 DP_DEBUG(dev, QEDR_MSG_MR,
2833 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2836 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2841 mr->type = QEDR_MR_FRMR;
2843 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2847 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2849 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2853 /* Index only, 18 bit long, lkey = itid << 8 | key */
2854 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2856 mr->hw_mr.pd = pd->pd_id;
2857 mr->hw_mr.local_read = 1;
2858 mr->hw_mr.local_write = 0;
2859 mr->hw_mr.remote_read = 0;
2860 mr->hw_mr.remote_write = 0;
2861 mr->hw_mr.remote_atomic = 0;
2862 mr->hw_mr.mw_bind = false;
2863 mr->hw_mr.pbl_ptr = 0;
2864 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2865 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2867 mr->hw_mr.length = 0;
2868 mr->hw_mr.vaddr = 0;
2869 mr->hw_mr.zbva = false;
2870 mr->hw_mr.phy_mr = true;
2871 mr->hw_mr.dma_mr = false;
2873 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2875 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2879 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2880 mr->ibmr.rkey = mr->ibmr.lkey;
2882 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2886 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2892 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2893 enum ib_mr_type mr_type, u32 max_num_sg)
2897 if (mr_type != IB_MR_TYPE_MEM_REG)
2898 return ERR_PTR(-EINVAL);
2900 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2903 return ERR_PTR(-EINVAL);
2908 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2910 struct qedr_mr *mr = get_qedr_mr(ibmr);
2911 struct qedr_pbl *pbl_table;
2912 struct regpair *pbe;
2915 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2916 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
2920 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2923 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2924 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2925 pbe = (struct regpair *)pbl_table->va;
2926 pbe += mr->npages % pbes_in_page;
2927 pbe->lo = cpu_to_le32((u32)addr);
2928 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2935 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2937 int work = info->completed - info->completed_handled - 1;
2939 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2940 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2941 struct qedr_pbl *pbl;
2943 /* Free all the page list that are possible to be freed
2944 * (all the ones that were invalidated), under the assumption
2945 * that if an FMR was completed successfully that means that
2946 * if there was an invalidate operation before it also ended
2948 pbl = list_first_entry(&info->inuse_pbl_list,
2949 struct qedr_pbl, list_entry);
2950 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
2951 info->completed_handled++;
2955 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2956 int sg_nents, unsigned int *sg_offset)
2958 struct qedr_mr *mr = get_qedr_mr(ibmr);
2962 handle_completed_mrs(mr->dev, &mr->info);
2963 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2966 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2968 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2969 struct qedr_pd *pd = get_qedr_pd(ibpd);
2973 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2975 return ERR_PTR(-ENOMEM);
2977 mr->type = QEDR_MR_DMA;
2979 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2981 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2985 /* index only, 18 bit long, lkey = itid << 8 | key */
2986 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2987 mr->hw_mr.pd = pd->pd_id;
2988 mr->hw_mr.local_read = 1;
2989 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2990 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2991 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2992 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2993 mr->hw_mr.dma_mr = true;
2995 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2997 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3001 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3002 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3003 mr->hw_mr.remote_atomic)
3004 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3006 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3010 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3016 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3018 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3021 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3025 for (i = 0; i < num_sge; i++)
3026 len += sg_list[i].length;
3031 static void swap_wqe_data64(u64 *p)
3035 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3036 *p = cpu_to_be64(cpu_to_le64(*p));
3039 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3040 struct qedr_qp *qp, u8 *wqe_size,
3041 const struct ib_send_wr *wr,
3042 const struct ib_send_wr **bad_wr,
3045 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3046 char *seg_prt, *wqe;
3049 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3050 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3064 /* Copy data inline */
3065 for (i = 0; i < wr->num_sge; i++) {
3066 u32 len = wr->sg_list[i].length;
3067 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3072 /* New segment required */
3074 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3076 seg_siz = sizeof(struct rdma_sq_common_wqe);
3080 /* Calculate currently allowed length */
3081 cur = min_t(u32, len, seg_siz);
3082 memcpy(seg_prt, src, cur);
3084 /* Update segment variables */
3088 /* Update sge variables */
3092 /* Swap fully-completed segments */
3094 swap_wqe_data64((u64 *)wqe);
3098 /* swap last not completed segment */
3100 swap_wqe_data64((u64 *)wqe);
3105 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
3107 DMA_REGPAIR_LE(sge->addr, vaddr); \
3108 (sge)->length = cpu_to_le32(vlength); \
3109 (sge)->flags = cpu_to_le32(vflags); \
3112 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
3114 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3115 (hdr)->num_sges = num_sge; \
3118 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
3120 DMA_REGPAIR_LE(sge->addr, vaddr); \
3121 (sge)->length = cpu_to_le32(vlength); \
3122 (sge)->l_key = cpu_to_le32(vlkey); \
3125 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3126 const struct ib_send_wr *wr)
3131 for (i = 0; i < wr->num_sge; i++) {
3132 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3134 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3135 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3136 sge->length = cpu_to_le32(wr->sg_list[i].length);
3137 data_size += wr->sg_list[i].length;
3141 *wqe_size += wr->num_sge;
3146 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3148 struct rdma_sq_rdma_wqe_1st *rwqe,
3149 struct rdma_sq_rdma_wqe_2nd *rwqe2,
3150 const struct ib_send_wr *wr,
3151 const struct ib_send_wr **bad_wr)
3153 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3154 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3156 if (wr->send_flags & IB_SEND_INLINE &&
3157 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3158 wr->opcode == IB_WR_RDMA_WRITE)) {
3161 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3162 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3163 bad_wr, &rwqe->flags, flags);
3166 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3169 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3171 struct rdma_sq_send_wqe_1st *swqe,
3172 struct rdma_sq_send_wqe_2st *swqe2,
3173 const struct ib_send_wr *wr,
3174 const struct ib_send_wr **bad_wr)
3176 memset(swqe2, 0, sizeof(*swqe2));
3177 if (wr->send_flags & IB_SEND_INLINE) {
3180 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3181 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3182 bad_wr, &swqe->flags, flags);
3185 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3188 static int qedr_prepare_reg(struct qedr_qp *qp,
3189 struct rdma_sq_fmr_wqe_1st *fwqe1,
3190 const struct ib_reg_wr *wr)
3192 struct qedr_mr *mr = get_qedr_mr(wr->mr);
3193 struct rdma_sq_fmr_wqe_2nd *fwqe2;
3195 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3196 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3197 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3198 fwqe1->l_key = wr->key;
3200 fwqe2->access_ctrl = 0;
3202 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3203 !!(wr->access & IB_ACCESS_REMOTE_READ));
3204 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3205 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3206 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3207 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3208 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3209 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3210 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3211 fwqe2->fmr_ctrl = 0;
3213 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3214 ilog2(mr->ibmr.page_size) - 12);
3216 fwqe2->length_hi = 0;
3217 fwqe2->length_lo = mr->ibmr.length;
3218 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3219 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3221 qp->wqe_wr_id[qp->sq.prod].mr = mr;
3226 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3229 case IB_WR_RDMA_WRITE:
3230 case IB_WR_RDMA_WRITE_WITH_IMM:
3231 return IB_WC_RDMA_WRITE;
3232 case IB_WR_SEND_WITH_IMM:
3234 case IB_WR_SEND_WITH_INV:
3236 case IB_WR_RDMA_READ:
3237 case IB_WR_RDMA_READ_WITH_INV:
3238 return IB_WC_RDMA_READ;
3239 case IB_WR_ATOMIC_CMP_AND_SWP:
3240 return IB_WC_COMP_SWAP;
3241 case IB_WR_ATOMIC_FETCH_AND_ADD:
3242 return IB_WC_FETCH_ADD;
3244 return IB_WC_REG_MR;
3245 case IB_WR_LOCAL_INV:
3246 return IB_WC_LOCAL_INV;
3252 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3253 const struct ib_send_wr *wr)
3255 int wq_is_full, err_wr, pbl_is_full;
3256 struct qedr_dev *dev = qp->dev;
3258 /* prevent SQ overflow and/or processing of a bad WR */
3259 err_wr = wr->num_sge > qp->sq.max_sges;
3260 wq_is_full = qedr_wq_is_full(&qp->sq);
3261 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3262 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3263 if (wq_is_full || err_wr || pbl_is_full) {
3264 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3266 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3268 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3271 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3273 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3275 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3279 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3281 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3283 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3290 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3291 const struct ib_send_wr **bad_wr)
3293 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3294 struct qedr_qp *qp = get_qedr_qp(ibqp);
3295 struct rdma_sq_atomic_wqe_1st *awqe1;
3296 struct rdma_sq_atomic_wqe_2nd *awqe2;
3297 struct rdma_sq_atomic_wqe_3rd *awqe3;
3298 struct rdma_sq_send_wqe_2st *swqe2;
3299 struct rdma_sq_local_inv_wqe *iwqe;
3300 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3301 struct rdma_sq_send_wqe_1st *swqe;
3302 struct rdma_sq_rdma_wqe_1st *rwqe;
3303 struct rdma_sq_fmr_wqe_1st *fwqe1;
3304 struct rdma_sq_common_wqe *wqe;
3309 if (!qedr_can_post_send(qp, wr)) {
3314 wqe = qed_chain_produce(&qp->sq.pbl);
3315 qp->wqe_wr_id[qp->sq.prod].signaled =
3316 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3319 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3320 !!(wr->send_flags & IB_SEND_SOLICITED));
3321 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3322 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3323 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3324 !!(wr->send_flags & IB_SEND_FENCE));
3325 wqe->prev_wqe_size = qp->prev_wqe_size;
3327 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3329 switch (wr->opcode) {
3330 case IB_WR_SEND_WITH_IMM:
3331 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3336 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3337 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3339 swqe2 = qed_chain_produce(&qp->sq.pbl);
3341 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3342 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3344 swqe->length = cpu_to_le32(length);
3345 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3346 qp->prev_wqe_size = swqe->wqe_size;
3347 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3350 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3351 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3354 swqe2 = qed_chain_produce(&qp->sq.pbl);
3355 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3357 swqe->length = cpu_to_le32(length);
3358 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3359 qp->prev_wqe_size = swqe->wqe_size;
3360 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3362 case IB_WR_SEND_WITH_INV:
3363 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3364 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3365 swqe2 = qed_chain_produce(&qp->sq.pbl);
3367 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3368 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3370 swqe->length = cpu_to_le32(length);
3371 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3372 qp->prev_wqe_size = swqe->wqe_size;
3373 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3376 case IB_WR_RDMA_WRITE_WITH_IMM:
3377 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3382 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3383 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3386 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3387 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3388 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3390 rwqe->length = cpu_to_le32(length);
3391 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3392 qp->prev_wqe_size = rwqe->wqe_size;
3393 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3395 case IB_WR_RDMA_WRITE:
3396 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3397 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3400 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3401 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3403 rwqe->length = cpu_to_le32(length);
3404 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3405 qp->prev_wqe_size = rwqe->wqe_size;
3406 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3408 case IB_WR_RDMA_READ_WITH_INV:
3409 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3410 /* fallthrough -- same is identical to RDMA READ */
3412 case IB_WR_RDMA_READ:
3413 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3414 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3417 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3418 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3420 rwqe->length = cpu_to_le32(length);
3421 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3422 qp->prev_wqe_size = rwqe->wqe_size;
3423 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3426 case IB_WR_ATOMIC_CMP_AND_SWP:
3427 case IB_WR_ATOMIC_FETCH_AND_ADD:
3428 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3429 awqe1->wqe_size = 4;
3431 awqe2 = qed_chain_produce(&qp->sq.pbl);
3432 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3433 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3435 awqe3 = qed_chain_produce(&qp->sq.pbl);
3437 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3438 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3439 DMA_REGPAIR_LE(awqe3->swap_data,
3440 atomic_wr(wr)->compare_add);
3442 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3443 DMA_REGPAIR_LE(awqe3->swap_data,
3444 atomic_wr(wr)->swap);
3445 DMA_REGPAIR_LE(awqe3->cmp_data,
3446 atomic_wr(wr)->compare_add);
3449 qedr_prepare_sq_sges(qp, NULL, wr);
3451 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3452 qp->prev_wqe_size = awqe1->wqe_size;
3455 case IB_WR_LOCAL_INV:
3456 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3459 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3460 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3461 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3462 qp->prev_wqe_size = iwqe->wqe_size;
3465 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3466 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3467 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3468 fwqe1->wqe_size = 2;
3470 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3472 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3477 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3478 qp->prev_wqe_size = fwqe1->wqe_size;
3481 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3490 /* Restore prod to its position before
3491 * this WR was processed
3493 value = le16_to_cpu(qp->sq.db_data.data.value);
3494 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3496 /* Restore prev_wqe_size */
3497 qp->prev_wqe_size = wqe->prev_wqe_size;
3499 DP_ERR(dev, "POST SEND FAILED\n");
3505 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3506 const struct ib_send_wr **bad_wr)
3508 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3509 struct qedr_qp *qp = get_qedr_qp(ibqp);
3510 unsigned long flags;
3515 if (qp->qp_type == IB_QPT_GSI)
3516 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3518 spin_lock_irqsave(&qp->q_lock, flags);
3520 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3521 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3522 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3523 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3524 spin_unlock_irqrestore(&qp->q_lock, flags);
3526 DP_DEBUG(dev, QEDR_MSG_CQ,
3527 "QP in wrong state! QP icid=0x%x state %d\n",
3528 qp->icid, qp->state);
3534 rc = __qedr_post_send(ibqp, wr, bad_wr);
3538 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3540 qedr_inc_sw_prod(&qp->sq);
3542 qp->sq.db_data.data.value++;
3548 * If there was a failure in the first WR then it will be triggered in
3549 * vane. However this is not harmful (as long as the producer value is
3550 * unchanged). For performance reasons we avoid checking for this
3551 * redundant doorbell.
3553 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3554 * soon as we give the doorbell, we could get a completion
3555 * for this wr, therefore we need to make sure that the
3556 * memory is updated before giving the doorbell.
3557 * During qedr_poll_cq, rmb is called before accessing the
3558 * cqe. This covers for the smp_rmb as well.
3561 writel(qp->sq.db_data.raw, qp->sq.db);
3563 /* Make sure write sticks */
3566 spin_unlock_irqrestore(&qp->q_lock, flags);
3571 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3575 /* Calculate number of elements used based on producer
3576 * count and consumer count and subtract it from max
3577 * work request supported so that we get elements left.
3579 used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
3581 return hw_srq->max_wr - used;
3584 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3585 const struct ib_recv_wr **bad_wr)
3587 struct qedr_srq *srq = get_qedr_srq(ibsrq);
3588 struct qedr_srq_hwq_info *hw_srq;
3589 struct qedr_dev *dev = srq->dev;
3590 struct qed_chain *pbl;
3591 unsigned long flags;
3596 spin_lock_irqsave(&srq->lock, flags);
3598 hw_srq = &srq->hw_srq;
3599 pbl = &srq->hw_srq.pbl;
3601 struct rdma_srq_wqe_header *hdr;
3604 if (!qedr_srq_elem_left(hw_srq) ||
3605 wr->num_sge > srq->hw_srq.max_sges) {
3606 DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
3607 hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
3608 wr->num_sge, srq->hw_srq.max_sges);
3614 hdr = qed_chain_produce(pbl);
3615 num_sge = wr->num_sge;
3616 /* Set number of sge and work request id in header */
3617 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3619 srq->hw_srq.wr_prod_cnt++;
3623 DP_DEBUG(dev, QEDR_MSG_SRQ,
3624 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3625 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3627 for (i = 0; i < wr->num_sge; i++) {
3628 struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3630 /* Set SGE length, lkey and address */
3631 SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3632 wr->sg_list[i].length, wr->sg_list[i].lkey);
3634 DP_DEBUG(dev, QEDR_MSG_SRQ,
3635 "[%d]: len %d key %x addr %x:%x\n",
3636 i, srq_sge->length, srq_sge->l_key,
3637 srq_sge->addr.hi, srq_sge->addr.lo);
3641 /* Flush WQE and SGE information before
3642 * updating producer.
3646 /* SRQ producer is 8 bytes. Need to update SGE producer index
3647 * in first 4 bytes and need to update WQE producer in
3650 *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
3651 offset = offsetof(struct rdma_srq_producers, wqe_prod);
3652 *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
3655 /* Flush producer after updating it. */
3660 DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3661 qed_chain_get_elem_left(pbl));
3662 spin_unlock_irqrestore(&srq->lock, flags);
3667 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3668 const struct ib_recv_wr **bad_wr)
3670 struct qedr_qp *qp = get_qedr_qp(ibqp);
3671 struct qedr_dev *dev = qp->dev;
3672 unsigned long flags;
3675 if (qp->qp_type == IB_QPT_GSI)
3676 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3678 spin_lock_irqsave(&qp->q_lock, flags);
3680 if (qp->state == QED_ROCE_QP_STATE_RESET) {
3681 spin_unlock_irqrestore(&qp->q_lock, flags);
3689 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3690 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3691 wr->num_sge > qp->rq.max_sges) {
3692 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3693 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3694 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3700 for (i = 0; i < wr->num_sge; i++) {
3702 struct rdma_rq_sge *rqe =
3703 qed_chain_produce(&qp->rq.pbl);
3705 /* First one must include the number
3706 * of SGE in the list
3709 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3712 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3713 wr->sg_list[i].lkey);
3715 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3716 wr->sg_list[i].length, flags);
3719 /* Special case of no sges. FW requires between 1-4 sges...
3720 * in this case we need to post 1 sge with length zero. this is
3721 * because rdma write with immediate consumes an RQ.
3725 struct rdma_rq_sge *rqe =
3726 qed_chain_produce(&qp->rq.pbl);
3728 /* First one must include the number
3729 * of SGE in the list
3731 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3732 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3734 RQ_SGE_SET(rqe, 0, 0, flags);
3738 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3739 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3741 qedr_inc_sw_prod(&qp->rq);
3743 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3744 * soon as we give the doorbell, we could get a completion
3745 * for this wr, therefore we need to make sure that the
3746 * memory is update before giving the doorbell.
3747 * During qedr_poll_cq, rmb is called before accessing the
3748 * cqe. This covers for the smp_rmb as well.
3752 qp->rq.db_data.data.value++;
3754 writel(qp->rq.db_data.raw, qp->rq.db);
3756 /* Make sure write sticks */
3759 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3760 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3761 mmiowb(); /* for second doorbell */
3767 spin_unlock_irqrestore(&qp->q_lock, flags);
3772 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3774 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3776 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3780 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3782 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3785 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3786 resp_cqe->qp_handle.lo,
3791 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3793 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3795 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3798 /* Return latest CQE (needs processing) */
3799 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3801 return cq->latest_cqe;
3804 /* In fmr we need to increase the number of fmr completed counter for the fmr
3805 * algorithm determining whether we can free a pbl or not.
3806 * we need to perform this whether the work request was signaled or not. for
3807 * this purpose we call this function from the condition that checks if a wr
3808 * should be skipped, to make sure we don't miss it ( possibly this fmr
3809 * operation was not signalted)
3811 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3813 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3814 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3817 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3818 struct qedr_cq *cq, int num_entries,
3819 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3824 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3825 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3826 qedr_chk_if_fmr(qp);
3832 wc->status = status;
3835 wc->src_qp = qp->id;
3838 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3839 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3841 switch (wc->opcode) {
3842 case IB_WC_RDMA_WRITE:
3843 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3845 case IB_WC_COMP_SWAP:
3846 case IB_WC_FETCH_ADD:
3850 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3852 case IB_WC_RDMA_READ:
3854 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3864 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3865 qed_chain_consume(&qp->sq.pbl);
3866 qedr_inc_sw_cons(&qp->sq);
3872 static int qedr_poll_cq_req(struct qedr_dev *dev,
3873 struct qedr_qp *qp, struct qedr_cq *cq,
3874 int num_entries, struct ib_wc *wc,
3875 struct rdma_cqe_requester *req)
3879 switch (req->status) {
3880 case RDMA_CQE_REQ_STS_OK:
3881 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3884 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3885 if (qp->state != QED_ROCE_QP_STATE_ERR)
3886 DP_DEBUG(dev, QEDR_MSG_CQ,
3887 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3888 cq->icid, qp->icid);
3889 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3890 IB_WC_WR_FLUSH_ERR, 1);
3893 /* process all WQE before the cosumer */
3894 qp->state = QED_ROCE_QP_STATE_ERR;
3895 cnt = process_req(dev, qp, cq, num_entries, wc,
3896 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3898 /* if we have extra WC fill it with actual error info */
3899 if (cnt < num_entries) {
3900 enum ib_wc_status wc_status;
3902 switch (req->status) {
3903 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3905 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3906 cq->icid, qp->icid);
3907 wc_status = IB_WC_BAD_RESP_ERR;
3909 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3911 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3912 cq->icid, qp->icid);
3913 wc_status = IB_WC_LOC_LEN_ERR;
3915 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3917 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3918 cq->icid, qp->icid);
3919 wc_status = IB_WC_LOC_QP_OP_ERR;
3921 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3923 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3924 cq->icid, qp->icid);
3925 wc_status = IB_WC_LOC_PROT_ERR;
3927 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3929 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3930 cq->icid, qp->icid);
3931 wc_status = IB_WC_MW_BIND_ERR;
3933 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3935 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3936 cq->icid, qp->icid);
3937 wc_status = IB_WC_REM_INV_REQ_ERR;
3939 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3941 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3942 cq->icid, qp->icid);
3943 wc_status = IB_WC_REM_ACCESS_ERR;
3945 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3947 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3948 cq->icid, qp->icid);
3949 wc_status = IB_WC_REM_OP_ERR;
3951 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3953 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3954 cq->icid, qp->icid);
3955 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3957 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3959 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3960 cq->icid, qp->icid);
3961 wc_status = IB_WC_RETRY_EXC_ERR;
3965 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3966 cq->icid, qp->icid);
3967 wc_status = IB_WC_GENERAL_ERR;
3969 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3977 static inline int qedr_cqe_resp_status_to_ib(u8 status)
3980 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3981 return IB_WC_LOC_ACCESS_ERR;
3982 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3983 return IB_WC_LOC_LEN_ERR;
3984 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3985 return IB_WC_LOC_QP_OP_ERR;
3986 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3987 return IB_WC_LOC_PROT_ERR;
3988 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3989 return IB_WC_MW_BIND_ERR;
3990 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3991 return IB_WC_REM_INV_RD_REQ_ERR;
3992 case RDMA_CQE_RESP_STS_OK:
3993 return IB_WC_SUCCESS;
3995 return IB_WC_GENERAL_ERR;
3999 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4002 wc->status = IB_WC_SUCCESS;
4003 wc->byte_len = le32_to_cpu(resp->length);
4005 if (resp->flags & QEDR_RESP_IMM) {
4006 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4007 wc->wc_flags |= IB_WC_WITH_IMM;
4009 if (resp->flags & QEDR_RESP_RDMA)
4010 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4012 if (resp->flags & QEDR_RESP_INV)
4015 } else if (resp->flags & QEDR_RESP_INV) {
4016 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4017 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4019 if (resp->flags & QEDR_RESP_RDMA)
4022 } else if (resp->flags & QEDR_RESP_RDMA) {
4029 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4030 struct qedr_cq *cq, struct ib_wc *wc,
4031 struct rdma_cqe_responder *resp, u64 wr_id)
4033 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4034 wc->opcode = IB_WC_RECV;
4037 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4038 if (qedr_set_ok_cqe_resp_wc(resp, wc))
4040 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4041 cq, cq->icid, resp->flags);
4044 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4045 if (wc->status == IB_WC_GENERAL_ERR)
4047 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4048 cq, cq->icid, resp->status);
4051 /* Fill the rest of the WC */
4053 wc->src_qp = qp->id;
4058 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4059 struct qedr_cq *cq, struct ib_wc *wc,
4060 struct rdma_cqe_responder *resp)
4062 struct qedr_srq *srq = qp->srq;
4065 wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4066 le32_to_cpu(resp->srq_wr_id.lo), u64);
4068 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4069 wc->status = IB_WC_WR_FLUSH_ERR;
4073 wc->src_qp = qp->id;
4077 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4079 srq->hw_srq.wr_cons_cnt++;
4083 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4084 struct qedr_cq *cq, struct ib_wc *wc,
4085 struct rdma_cqe_responder *resp)
4087 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4089 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4091 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4092 qed_chain_consume(&qp->rq.pbl);
4093 qedr_inc_sw_cons(&qp->rq);
4098 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4099 int num_entries, struct ib_wc *wc, u16 hw_cons)
4103 while (num_entries && qp->rq.wqe_cons != hw_cons) {
4105 wc->status = IB_WC_WR_FLUSH_ERR;
4108 wc->src_qp = qp->id;
4110 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4115 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4116 qed_chain_consume(&qp->rq.pbl);
4117 qedr_inc_sw_cons(&qp->rq);
4123 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4124 struct rdma_cqe_responder *resp, int *update)
4126 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4132 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4133 struct qedr_cq *cq, int num_entries,
4135 struct rdma_cqe_responder *resp)
4139 cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4145 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4146 struct qedr_cq *cq, int num_entries,
4147 struct ib_wc *wc, struct rdma_cqe_responder *resp,
4152 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4153 cnt = process_resp_flush(qp, cq, num_entries, wc,
4154 resp->rq_cons_or_srq_id);
4155 try_consume_resp_cqe(cq, qp, resp, update);
4157 cnt = process_resp_one(dev, qp, cq, wc, resp);
4165 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4166 struct rdma_cqe_requester *req, int *update)
4168 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4174 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4176 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4177 struct qedr_cq *cq = get_qedr_cq(ibcq);
4178 union rdma_cqe *cqe;
4179 u32 old_cons, new_cons;
4180 unsigned long flags;
4184 if (cq->destroyed) {
4186 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4191 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4192 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4194 spin_lock_irqsave(&cq->cq_lock, flags);
4195 cqe = cq->latest_cqe;
4196 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4197 while (num_entries && is_valid_cqe(cq, cqe)) {
4201 /* prevent speculative reads of any field of CQE */
4204 qp = cqe_get_qp(cqe);
4206 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4212 switch (cqe_get_type(cqe)) {
4213 case RDMA_CQE_TYPE_REQUESTER:
4214 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4216 try_consume_req_cqe(cq, qp, &cqe->req, &update);
4218 case RDMA_CQE_TYPE_RESPONDER_RQ:
4219 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4220 &cqe->resp, &update);
4222 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4223 cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4227 case RDMA_CQE_TYPE_INVALID:
4229 DP_ERR(dev, "Error: invalid CQE type = %d\n",
4238 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4240 cq->cq_cons += new_cons - old_cons;
4243 /* doorbell notifies abount latest VALID entry,
4244 * but chain already point to the next INVALID one
4246 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4248 spin_unlock_irqrestore(&cq->cq_lock, flags);
4252 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4254 const struct ib_wc *in_wc,
4255 const struct ib_grh *in_grh,
4256 const struct ib_mad_hdr *mad_hdr,
4257 size_t in_mad_size, struct ib_mad_hdr *out_mad,
4258 size_t *out_mad_size, u16 *out_mad_pkey_index)
4260 struct qedr_dev *dev = get_qedr_dev(ibdev);
4262 DP_DEBUG(dev, QEDR_MSG_GSI,
4263 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
4264 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
4265 mad_hdr->class_specific, mad_hdr->class_version,
4266 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
4267 return IB_MAD_RESULT_SUCCESS;