1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
56 #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
57 #define RDMA_MAX_SGE_PER_SRQ (4)
58 #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
60 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
63 QEDR_USER_MMAP_IO_WC = 0,
64 QEDR_USER_MMAP_PHYS_PAGE,
67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
70 size_t min_len = min_t(size_t, len, udata->outlen);
72 return ib_copy_to_udata(udata, src, min_len);
75 int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
77 if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
80 *pkey = QEDR_ROCE_PKEY_DEFAULT;
84 int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
85 int index, union ib_gid *sgid)
87 struct qedr_dev *dev = get_qedr_dev(ibdev);
89 memset(sgid->raw, 0, sizeof(sgid->raw));
90 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
92 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 sgid->global.interface_id, sgid->global.subnet_prefix);
98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
100 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 struct qedr_device_attr *qattr = &dev->attr;
102 struct qedr_srq *srq = get_qedr_srq(ibsrq);
104 srq_attr->srq_limit = srq->srq_limit;
105 srq_attr->max_wr = qattr->max_srq_wr;
106 srq_attr->max_sge = qattr->max_sge;
111 int qedr_query_device(struct ib_device *ibdev,
112 struct ib_device_attr *attr, struct ib_udata *udata)
114 struct qedr_dev *dev = get_qedr_dev(ibdev);
115 struct qedr_device_attr *qattr = &dev->attr;
117 if (!dev->rdma_ctx) {
119 "qedr_query_device called with invalid params rdma_ctx=%p\n",
124 memset(attr, 0, sizeof(*attr));
126 attr->fw_ver = qattr->fw_ver;
127 attr->sys_image_guid = qattr->sys_image_guid;
128 attr->max_mr_size = qattr->max_mr_size;
129 attr->page_size_cap = qattr->page_size_caps;
130 attr->vendor_id = qattr->vendor_id;
131 attr->vendor_part_id = qattr->vendor_part_id;
132 attr->hw_ver = qattr->hw_ver;
133 attr->max_qp = qattr->max_qp;
134 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 IB_DEVICE_RC_RNR_NAK_GEN |
137 IB_DEVICE_MEM_MGT_EXTENSIONS;
138 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
140 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
141 attr->device_cap_flags |= IB_DEVICE_XRC;
142 attr->max_send_sge = qattr->max_sge;
143 attr->max_recv_sge = qattr->max_sge;
144 attr->max_sge_rd = qattr->max_sge;
145 attr->max_cq = qattr->max_cq;
146 attr->max_cqe = qattr->max_cqe;
147 attr->max_mr = qattr->max_mr;
148 attr->max_mw = qattr->max_mw;
149 attr->max_pd = qattr->max_pd;
150 attr->atomic_cap = dev->atomic_cap;
151 attr->max_qp_init_rd_atom =
152 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
153 attr->max_qp_rd_atom =
154 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
155 attr->max_qp_init_rd_atom);
157 attr->max_srq = qattr->max_srq;
158 attr->max_srq_sge = qattr->max_srq_sge;
159 attr->max_srq_wr = qattr->max_srq_wr;
161 attr->local_ca_ack_delay = qattr->dev_ack_delay;
162 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
163 attr->max_pkeys = qattr->max_pkey;
164 attr->max_ah = qattr->max_ah;
169 static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
174 *ib_speed = IB_SPEED_SDR;
175 *ib_width = IB_WIDTH_1X;
178 *ib_speed = IB_SPEED_QDR;
179 *ib_width = IB_WIDTH_1X;
183 *ib_speed = IB_SPEED_DDR;
184 *ib_width = IB_WIDTH_4X;
188 *ib_speed = IB_SPEED_EDR;
189 *ib_width = IB_WIDTH_1X;
193 *ib_speed = IB_SPEED_QDR;
194 *ib_width = IB_WIDTH_4X;
198 *ib_speed = IB_SPEED_HDR;
199 *ib_width = IB_WIDTH_1X;
203 *ib_speed = IB_SPEED_EDR;
204 *ib_width = IB_WIDTH_4X;
209 *ib_speed = IB_SPEED_SDR;
210 *ib_width = IB_WIDTH_1X;
214 int qedr_query_port(struct ib_device *ibdev, u32 port,
215 struct ib_port_attr *attr)
217 struct qedr_dev *dev;
218 struct qed_rdma_port *rdma_port;
220 dev = get_qedr_dev(ibdev);
222 if (!dev->rdma_ctx) {
223 DP_ERR(dev, "rdma_ctx is NULL\n");
227 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
229 /* *attr being zeroed by the caller, avoid zeroing it here */
230 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
231 attr->state = IB_PORT_ACTIVE;
232 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
234 attr->state = IB_PORT_DOWN;
235 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
237 attr->max_mtu = IB_MTU_4096;
242 attr->ip_gids = true;
243 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
244 attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
245 attr->gid_tbl_len = 1;
247 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
248 attr->gid_tbl_len = QEDR_MAX_SGID;
249 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
251 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
252 attr->qkey_viol_cntr = 0;
253 get_link_speed_and_width(rdma_port->link_speed,
254 &attr->active_speed, &attr->active_width);
255 attr->max_msg_sz = rdma_port->max_msg_size;
256 attr->max_vl_num = 4;
261 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
263 struct ib_device *ibdev = uctx->device;
265 struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
266 struct qedr_alloc_ucontext_resp uresp = {};
267 struct qedr_alloc_ucontext_req ureq = {};
268 struct qedr_dev *dev = get_qedr_dev(ibdev);
269 struct qed_rdma_add_user_out_params oparams;
270 struct qedr_user_mmap_entry *entry;
276 rc = ib_copy_from_udata(&ureq, udata,
277 min(sizeof(ureq), udata->inlen));
279 DP_ERR(dev, "Problem copying data from user space\n");
282 ctx->edpm_mode = !!(ureq.context_flags &
283 QEDR_ALLOC_UCTX_EDPM_MODE);
284 ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
287 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
290 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
295 ctx->dpi = oparams.dpi;
296 ctx->dpi_addr = oparams.dpi_addr;
297 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
298 ctx->dpi_size = oparams.dpi_size;
299 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
305 entry->io_address = ctx->dpi_phys_addr;
306 entry->length = ctx->dpi_size;
307 entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
308 entry->dpi = ctx->dpi;
310 rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
316 ctx->db_mmap_entry = &entry->rdma_entry;
318 if (!dev->user_dpm_enabled)
320 else if (rdma_protocol_iwarp(&dev->ibdev, 1))
321 uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
323 uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
324 QEDR_DPM_TYPE_ROCE_LEGACY |
325 QEDR_DPM_TYPE_ROCE_EDPM_MODE;
327 if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
328 uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
329 uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
330 uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
331 uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
334 uresp.wids_enabled = 1;
335 uresp.wid_count = oparams.wid_count;
336 uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
337 uresp.db_size = ctx->dpi_size;
338 uresp.max_send_wr = dev->attr.max_sqe;
339 uresp.max_recv_wr = dev->attr.max_rqe;
340 uresp.max_srq_wr = dev->attr.max_srq_wr;
341 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
342 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
343 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
344 uresp.max_cqes = QEDR_MAX_CQES;
346 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
352 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
357 if (!ctx->db_mmap_entry)
358 dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
360 rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
365 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
367 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
369 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
372 rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
375 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
377 struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
378 struct qedr_dev *dev = entry->dev;
380 if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
381 free_page((unsigned long)entry->address);
382 else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
383 dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
388 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
390 struct ib_device *dev = ucontext->device;
391 size_t length = vma->vm_end - vma->vm_start;
392 struct rdma_user_mmap_entry *rdma_entry;
393 struct qedr_user_mmap_entry *entry;
398 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
399 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
401 rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
403 ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
407 entry = get_qedr_mmap_entry(rdma_entry);
409 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
410 entry->io_address, length, entry->mmap_flag);
412 switch (entry->mmap_flag) {
413 case QEDR_USER_MMAP_IO_WC:
414 pfn = entry->io_address >> PAGE_SHIFT;
415 rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
416 pgprot_writecombine(vma->vm_page_prot),
419 case QEDR_USER_MMAP_PHYS_PAGE:
420 rc = vm_insert_page(vma, vma->vm_start,
421 virt_to_page(entry->address));
429 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
430 entry->io_address, length, entry->mmap_flag, rc);
432 rdma_user_mmap_entry_put(rdma_entry);
436 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
438 struct ib_device *ibdev = ibpd->device;
439 struct qedr_dev *dev = get_qedr_dev(ibdev);
440 struct qedr_pd *pd = get_qedr_pd(ibpd);
444 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
445 udata ? "User Lib" : "Kernel");
447 if (!dev->rdma_ctx) {
448 DP_ERR(dev, "invalid RDMA context\n");
452 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
459 struct qedr_alloc_pd_uresp uresp = {
462 struct qedr_ucontext *context = rdma_udata_to_drv_context(
463 udata, struct qedr_ucontext, ibucontext);
465 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
467 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
468 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
479 int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
481 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
482 struct qedr_pd *pd = get_qedr_pd(ibpd);
484 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
485 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
490 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
492 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
493 struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
495 return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
498 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
500 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
501 u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
503 dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
506 static void qedr_free_pbl(struct qedr_dev *dev,
507 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
509 struct pci_dev *pdev = dev->pdev;
512 for (i = 0; i < pbl_info->num_pbls; i++) {
515 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
516 pbl[i].va, pbl[i].pa);
522 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
523 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
525 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
526 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
527 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
529 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
530 struct qedr_pbl_info *pbl_info,
533 struct pci_dev *pdev = dev->pdev;
534 struct qedr_pbl *pbl_table;
535 dma_addr_t *pbl_main_tbl;
540 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
542 return ERR_PTR(-ENOMEM);
544 for (i = 0; i < pbl_info->num_pbls; i++) {
545 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
550 pbl_table[i].va = va;
551 pbl_table[i].pa = pa;
554 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
555 * the first one with physical pointers to all of the rest
557 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
558 for (i = 0; i < pbl_info->num_pbls - 1; i++)
559 pbl_main_tbl[i] = pbl_table[i + 1].pa;
564 for (i--; i >= 0; i--)
565 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
566 pbl_table[i].va, pbl_table[i].pa);
568 qedr_free_pbl(dev, pbl_info, pbl_table);
570 return ERR_PTR(-ENOMEM);
573 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
574 struct qedr_pbl_info *pbl_info,
575 u32 num_pbes, int two_layer_capable)
581 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
582 if (num_pbes > MAX_PBES_TWO_LAYER) {
583 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
588 /* calculate required pbl page size */
589 pbl_size = MIN_FW_PBL_PAGE_SIZE;
590 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
591 NUM_PBES_ON_PAGE(pbl_size);
593 while (pbl_capacity < num_pbes) {
595 pbl_capacity = pbl_size / sizeof(u64);
596 pbl_capacity = pbl_capacity * pbl_capacity;
599 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
600 num_pbls++; /* One for the layer0 ( points to the pbls) */
601 pbl_info->two_layered = true;
603 /* One layered PBL */
605 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
606 roundup_pow_of_two((num_pbes * sizeof(u64))));
607 pbl_info->two_layered = false;
610 pbl_info->num_pbls = num_pbls;
611 pbl_info->pbl_size = pbl_size;
612 pbl_info->num_pbes = num_pbes;
614 DP_DEBUG(dev, QEDR_MSG_MR,
615 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
616 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
621 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
622 struct qedr_pbl *pbl,
623 struct qedr_pbl_info *pbl_info, u32 pg_shift)
625 int pbe_cnt, total_num_pbes = 0;
626 struct qedr_pbl *pbl_tbl;
627 struct ib_block_iter biter;
630 if (!pbl_info->num_pbes)
633 /* If we have a two layered pbl, the first pbl points to the rest
634 * of the pbls and the first entry lays on the second pbl in the table
636 if (pbl_info->two_layered)
641 pbe = (struct regpair *)pbl_tbl->va;
643 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
649 rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
650 u64 pg_addr = rdma_block_iter_dma_address(&biter);
652 pbe->lo = cpu_to_le32(pg_addr);
653 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
659 if (total_num_pbes == pbl_info->num_pbes)
662 /* If the given pbl is full storing the pbes, move to next pbl.
664 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
666 pbe = (struct regpair *)pbl_tbl->va;
672 static int qedr_db_recovery_add(struct qedr_dev *dev,
673 void __iomem *db_addr,
675 enum qed_db_rec_width db_width,
676 enum qed_db_rec_space db_space)
679 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
683 return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
687 static void qedr_db_recovery_del(struct qedr_dev *dev,
688 void __iomem *db_addr,
692 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
696 /* Ignore return code as there is not much we can do about it. Error
697 * log will be printed inside.
699 dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
702 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
703 struct qedr_cq *cq, struct ib_udata *udata,
706 struct qedr_create_cq_uresp uresp;
709 memset(&uresp, 0, sizeof(uresp));
711 uresp.db_offset = db_offset;
712 uresp.icid = cq->icid;
713 if (cq->q.db_mmap_entry)
715 rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
717 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
719 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
724 static void consume_cqe(struct qedr_cq *cq)
726 if (cq->latest_cqe == cq->toggle_cqe)
727 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
729 cq->latest_cqe = qed_chain_consume(&cq->pbl);
732 static inline int qedr_align_cq_entries(int entries)
734 u64 size, aligned_size;
736 /* We allocate an extra entry that we don't report to the FW. */
737 size = (entries + 1) * QEDR_CQE_SIZE;
738 aligned_size = ALIGN(size, PAGE_SIZE);
740 return aligned_size / QEDR_CQE_SIZE;
743 static int qedr_init_user_db_rec(struct ib_udata *udata,
744 struct qedr_dev *dev, struct qedr_userq *q,
745 bool requires_db_rec)
747 struct qedr_ucontext *uctx =
748 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
750 struct qedr_user_mmap_entry *entry;
753 /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
754 if (requires_db_rec == 0 || !uctx->db_rec)
757 /* Allocate a page for doorbell recovery, add to mmap */
758 q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
759 if (!q->db_rec_data) {
760 DP_ERR(dev, "get_zeroed_page failed\n");
764 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
766 goto err_free_db_data;
768 entry->address = q->db_rec_data;
769 entry->length = PAGE_SIZE;
770 entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
771 rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
777 q->db_mmap_entry = &entry->rdma_entry;
785 free_page((unsigned long)q->db_rec_data);
786 q->db_rec_data = NULL;
790 static inline int qedr_init_user_queue(struct ib_udata *udata,
791 struct qedr_dev *dev,
792 struct qedr_userq *q, u64 buf_addr,
793 size_t buf_len, bool requires_db_rec,
800 q->buf_addr = buf_addr;
801 q->buf_len = buf_len;
802 q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
803 if (IS_ERR(q->umem)) {
804 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
806 return PTR_ERR(q->umem);
809 fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
810 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
814 if (alloc_and_init) {
815 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
816 if (IS_ERR(q->pbl_tbl)) {
817 rc = PTR_ERR(q->pbl_tbl);
820 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
823 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
830 /* mmap the user address used to store doorbell data for recovery */
831 return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
834 ib_umem_release(q->umem);
840 static inline void qedr_init_cq_params(struct qedr_cq *cq,
841 struct qedr_ucontext *ctx,
842 struct qedr_dev *dev, int vector,
843 int chain_entries, int page_cnt,
845 struct qed_rdma_create_cq_in_params
848 memset(params, 0, sizeof(*params));
849 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
850 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
851 params->cnq_id = vector;
852 params->cq_size = chain_entries - 1;
853 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
854 params->pbl_num_pages = page_cnt;
855 params->pbl_ptr = pbl_ptr;
856 params->pbl_two_level = 0;
859 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
861 cq->db.data.agg_flags = flags;
862 cq->db.data.value = cpu_to_le32(cons);
863 writeq(cq->db.raw, cq->db_addr);
866 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
868 struct qedr_cq *cq = get_qedr_cq(ibcq);
869 unsigned long sflags;
870 struct qedr_dev *dev;
872 dev = get_qedr_dev(ibcq->device);
876 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
882 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
885 spin_lock_irqsave(&cq->cq_lock, sflags);
889 if (flags & IB_CQ_SOLICITED)
890 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
892 if (flags & IB_CQ_NEXT_COMP)
893 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
895 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
897 spin_unlock_irqrestore(&cq->cq_lock, sflags);
902 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
903 struct ib_udata *udata)
905 struct ib_device *ibdev = ibcq->device;
906 struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
907 udata, struct qedr_ucontext, ibucontext);
908 struct qed_rdma_destroy_cq_out_params destroy_oparams;
909 struct qed_rdma_destroy_cq_in_params destroy_iparams;
910 struct qed_chain_init_params chain_params = {
911 .mode = QED_CHAIN_MODE_PBL,
912 .intended_use = QED_CHAIN_USE_TO_CONSUME,
913 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
914 .elem_size = sizeof(union rdma_cqe),
916 struct qedr_dev *dev = get_qedr_dev(ibdev);
917 struct qed_rdma_create_cq_in_params params;
918 struct qedr_create_cq_ureq ureq = {};
919 int vector = attr->comp_vector;
920 int entries = attr->cqe;
921 struct qedr_cq *cq = get_qedr_cq(ibcq);
929 DP_DEBUG(dev, QEDR_MSG_INIT,
930 "create_cq: called from %s. entries=%d, vector=%d\n",
931 udata ? "User Lib" : "Kernel", entries, vector);
936 if (entries > QEDR_MAX_CQES) {
938 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
939 entries, QEDR_MAX_CQES);
943 chain_entries = qedr_align_cq_entries(entries);
944 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
945 chain_params.num_elems = chain_entries;
947 /* calc db offset. user will add DPI base, kernel will add db addr */
948 db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
951 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
954 "create cq: problem copying data from user space\n");
960 "create cq: cannot create a cq with 0 entries\n");
964 cq->cq_type = QEDR_CQ_TYPE_USER;
966 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
967 ureq.len, true, IB_ACCESS_LOCAL_WRITE,
972 pbl_ptr = cq->q.pbl_tbl->pa;
973 page_cnt = cq->q.pbl_info.num_pbes;
975 cq->ibcq.cqe = chain_entries;
976 cq->q.db_addr = ctx->dpi_addr + db_offset;
978 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
980 rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
985 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
986 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
987 cq->ibcq.cqe = cq->pbl.capacity;
990 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
993 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, ¶ms, &icid);
998 cq->sig = QEDR_CQ_MAGIC_NUMBER;
999 spin_lock_init(&cq->cq_lock);
1002 rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1006 rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1007 &cq->q.db_rec_data->db_data,
1014 /* Generate doorbell address. */
1015 cq->db.data.icid = cq->icid;
1016 cq->db_addr = dev->db_addr + db_offset;
1017 cq->db.data.params = DB_AGG_CMD_MAX <<
1018 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1020 /* point to the very last element, passing it we will toggle */
1021 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1022 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1023 cq->latest_cqe = NULL;
1025 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1027 rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1028 DB_REC_WIDTH_64B, DB_REC_KERNEL);
1033 DP_DEBUG(dev, QEDR_MSG_CQ,
1034 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1035 cq->icid, cq, params.cq_size);
1040 destroy_iparams.icid = cq->icid;
1041 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1045 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1046 ib_umem_release(cq->q.umem);
1047 if (cq->q.db_mmap_entry)
1048 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1050 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1056 #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
1057 #define QEDR_DESTROY_CQ_ITER_DURATION (10)
1059 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1061 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1062 struct qed_rdma_destroy_cq_out_params oparams;
1063 struct qed_rdma_destroy_cq_in_params iparams;
1064 struct qedr_cq *cq = get_qedr_cq(ibcq);
1067 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1071 /* GSIs CQs are handled by driver, so they don't exist in the FW */
1072 if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1073 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1077 iparams.icid = cq->icid;
1078 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1079 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1082 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1083 ib_umem_release(cq->q.umem);
1085 if (cq->q.db_rec_data) {
1086 qedr_db_recovery_del(dev, cq->q.db_addr,
1087 &cq->q.db_rec_data->db_data);
1088 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1091 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1094 /* We don't want the IRQ handler to handle a non-existing CQ so we
1095 * wait until all CNQ interrupts, if any, are received. This will always
1096 * happen and will always happen very fast. If not, then a serious error
1097 * has occured. That is why we can use a long delay.
1098 * We spin for a short time so we don’t lose time on context switching
1099 * in case all the completions are handled in that span. Otherwise
1100 * we sleep for a while and check again. Since the CNQ may be
1101 * associated with (only) the current CPU we use msleep to allow the
1102 * current CPU to be freed.
1103 * The CNQ notification is increased in qedr_irq_handler().
1105 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1106 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1107 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1111 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1112 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1113 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1117 /* Note that we don't need to have explicit code to wait for the
1118 * completion of the event handler because it is invoked from the EQ.
1119 * Since the destroy CQ ramrod has also been received on the EQ we can
1120 * be certain that there's no event handler in process.
1125 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1126 struct ib_qp_attr *attr,
1128 struct qed_rdma_modify_qp_in_params
1131 const struct ib_gid_attr *gid_attr;
1132 enum rdma_network_type nw_type;
1133 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1138 gid_attr = grh->sgid_attr;
1139 ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1143 nw_type = rdma_gid_attr_network_type(gid_attr);
1145 case RDMA_NETWORK_IPV6:
1146 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1147 sizeof(qp_params->sgid));
1148 memcpy(&qp_params->dgid.bytes[0],
1150 sizeof(qp_params->dgid));
1151 qp_params->roce_mode = ROCE_V2_IPV6;
1152 SET_FIELD(qp_params->modify_flags,
1153 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1155 case RDMA_NETWORK_ROCE_V1:
1156 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1157 sizeof(qp_params->sgid));
1158 memcpy(&qp_params->dgid.bytes[0],
1160 sizeof(qp_params->dgid));
1161 qp_params->roce_mode = ROCE_V1;
1163 case RDMA_NETWORK_IPV4:
1164 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1165 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1166 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1167 qp_params->sgid.ipv4_addr = ipv4_addr;
1169 qedr_get_ipv4_from_gid(grh->dgid.raw);
1170 qp_params->dgid.ipv4_addr = ipv4_addr;
1171 SET_FIELD(qp_params->modify_flags,
1172 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1173 qp_params->roce_mode = ROCE_V2_IPV4;
1179 for (i = 0; i < 4; i++) {
1180 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1181 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1184 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1185 qp_params->vlan_id = 0;
1190 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1191 struct ib_qp_init_attr *attrs,
1192 struct ib_udata *udata)
1194 struct qedr_device_attr *qattr = &dev->attr;
1196 /* QP0... attrs->qp_type == IB_QPT_GSI */
1197 if (attrs->qp_type != IB_QPT_RC &&
1198 attrs->qp_type != IB_QPT_GSI &&
1199 attrs->qp_type != IB_QPT_XRC_INI &&
1200 attrs->qp_type != IB_QPT_XRC_TGT) {
1201 DP_DEBUG(dev, QEDR_MSG_QP,
1202 "create qp: unsupported qp type=0x%x requested\n",
1207 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1209 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1210 attrs->cap.max_send_wr, qattr->max_sqe);
1214 if (attrs->cap.max_inline_data > qattr->max_inline) {
1216 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1217 attrs->cap.max_inline_data, qattr->max_inline);
1221 if (attrs->cap.max_send_sge > qattr->max_sge) {
1223 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1224 attrs->cap.max_send_sge, qattr->max_sge);
1228 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1230 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1231 attrs->cap.max_recv_sge, qattr->max_sge);
1235 /* verify consumer QPs are not trying to use GSI QP's CQ.
1236 * TGT QP isn't associated with RQ/SQ
1238 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1239 (attrs->qp_type != IB_QPT_XRC_TGT) &&
1240 (attrs->qp_type != IB_QPT_XRC_INI)) {
1241 struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1242 struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1244 if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1245 (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1247 "create qp: consumer QP cannot use GSI CQs.\n");
1255 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1256 struct qedr_srq *srq, struct ib_udata *udata)
1258 struct qedr_create_srq_uresp uresp = {};
1261 uresp.srq_id = srq->srq_id;
1263 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1265 DP_ERR(dev, "create srq: problem copying data to user space\n");
1270 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1271 struct qedr_create_qp_uresp *uresp,
1274 /* iWARP requires two doorbells per RQ. */
1275 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1276 uresp->rq_db_offset =
1277 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1278 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1280 uresp->rq_db_offset =
1281 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1284 uresp->rq_icid = qp->icid;
1285 if (qp->urq.db_mmap_entry)
1286 uresp->rq_db_rec_addr =
1287 rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1290 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1291 struct qedr_create_qp_uresp *uresp,
1294 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1296 /* iWARP uses the same cid for rq and sq */
1297 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1298 uresp->sq_icid = qp->icid;
1300 uresp->sq_icid = qp->icid + 1;
1302 if (qp->usq.db_mmap_entry)
1303 uresp->sq_db_rec_addr =
1304 rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1307 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1308 struct qedr_qp *qp, struct ib_udata *udata,
1309 struct qedr_create_qp_uresp *uresp)
1313 memset(uresp, 0, sizeof(*uresp));
1315 if (qedr_qp_has_sq(qp))
1316 qedr_copy_sq_uresp(dev, uresp, qp);
1318 if (qedr_qp_has_rq(qp))
1319 qedr_copy_rq_uresp(dev, uresp, qp);
1321 uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1322 uresp->qp_id = qp->qp_id;
1324 rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1327 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1333 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1335 qed_chain_reset(&qph->pbl);
1339 qph->db_data.data.value = cpu_to_le16(0);
1342 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1345 struct ib_qp_init_attr *attrs)
1347 spin_lock_init(&qp->q_lock);
1348 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1349 kref_init(&qp->refcnt);
1350 init_completion(&qp->iwarp_cm_comp);
1351 init_completion(&qp->qp_rel_comp);
1355 qp->qp_type = attrs->qp_type;
1356 qp->max_inline_data = attrs->cap.max_inline_data;
1357 qp->state = QED_ROCE_QP_STATE_RESET;
1359 qp->prev_wqe_size = 0;
1361 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1363 if (qedr_qp_has_sq(qp)) {
1364 qedr_reset_qp_hwq_info(&qp->sq);
1365 qp->sq.max_sges = attrs->cap.max_send_sge;
1366 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1367 DP_DEBUG(dev, QEDR_MSG_QP,
1368 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1369 qp->sq.max_sges, qp->sq_cq->icid);
1373 qp->srq = get_qedr_srq(attrs->srq);
1375 if (qedr_qp_has_rq(qp)) {
1376 qedr_reset_qp_hwq_info(&qp->rq);
1377 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1378 qp->rq.max_sges = attrs->cap.max_recv_sge;
1379 DP_DEBUG(dev, QEDR_MSG_QP,
1380 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1381 qp->rq.max_sges, qp->rq_cq->icid);
1384 DP_DEBUG(dev, QEDR_MSG_QP,
1385 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1386 pd->pd_id, qp->qp_type, qp->max_inline_data,
1387 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1388 DP_DEBUG(dev, QEDR_MSG_QP,
1389 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1390 qp->sq.max_sges, qp->sq_cq->icid);
1393 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1397 if (qedr_qp_has_sq(qp)) {
1398 qp->sq.db = dev->db_addr +
1399 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1400 qp->sq.db_data.data.icid = qp->icid + 1;
1401 rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1402 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1407 if (qedr_qp_has_rq(qp)) {
1408 qp->rq.db = dev->db_addr +
1409 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1410 qp->rq.db_data.data.icid = qp->icid;
1411 rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1412 DB_REC_WIDTH_32B, DB_REC_KERNEL);
1413 if (rc && qedr_qp_has_sq(qp))
1414 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1420 static int qedr_check_srq_params(struct qedr_dev *dev,
1421 struct ib_srq_init_attr *attrs,
1422 struct ib_udata *udata)
1424 struct qedr_device_attr *qattr = &dev->attr;
1426 if (attrs->attr.max_wr > qattr->max_srq_wr) {
1428 "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1429 attrs->attr.max_wr, qattr->max_srq_wr);
1433 if (attrs->attr.max_sge > qattr->max_sge) {
1435 "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1436 attrs->attr.max_sge, qattr->max_sge);
1439 if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1440 DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1447 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1449 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1450 ib_umem_release(srq->usrq.umem);
1451 ib_umem_release(srq->prod_umem);
1454 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1456 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1457 struct qedr_dev *dev = srq->dev;
1459 dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1461 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1462 hw_srq->virt_prod_pair_addr,
1463 hw_srq->phy_prod_pair_addr);
1466 static int qedr_init_srq_user_params(struct ib_udata *udata,
1467 struct qedr_srq *srq,
1468 struct qedr_create_srq_ureq *ureq,
1471 struct scatterlist *sg;
1474 rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1475 ureq->srq_len, false, access, 1);
1479 srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1480 sizeof(struct rdma_srq_producers), access);
1481 if (IS_ERR(srq->prod_umem)) {
1482 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1483 ib_umem_release(srq->usrq.umem);
1485 "create srq: failed ib_umem_get for producer, got %ld\n",
1486 PTR_ERR(srq->prod_umem));
1487 return PTR_ERR(srq->prod_umem);
1490 sg = srq->prod_umem->sgt_append.sgt.sgl;
1491 srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1496 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1497 struct qedr_dev *dev,
1498 struct ib_srq_init_attr *init_attr)
1500 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1501 struct qed_chain_init_params params = {
1502 .mode = QED_CHAIN_MODE_PBL,
1503 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1504 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
1505 .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
1507 dma_addr_t phy_prod_pair_addr;
1512 va = dma_alloc_coherent(&dev->pdev->dev,
1513 sizeof(struct rdma_srq_producers),
1514 &phy_prod_pair_addr, GFP_KERNEL);
1517 "create srq: failed to allocate dma memory for producer\n");
1521 hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1522 hw_srq->virt_prod_pair_addr = va;
1524 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1525 params.num_elems = num_elems;
1527 rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms);
1531 hw_srq->num_elems = num_elems;
1536 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1537 va, phy_prod_pair_addr);
1541 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1542 struct ib_udata *udata)
1544 struct qed_rdma_destroy_srq_in_params destroy_in_params;
1545 struct qed_rdma_create_srq_in_params in_params = {};
1546 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1547 struct qed_rdma_create_srq_out_params out_params;
1548 struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1549 struct qedr_create_srq_ureq ureq = {};
1550 u64 pbl_base_addr, phy_prod_pair_addr;
1551 struct qedr_srq_hwq_info *hw_srq;
1552 u32 page_cnt, page_size;
1553 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1556 DP_DEBUG(dev, QEDR_MSG_QP,
1557 "create SRQ called from %s (pd %p)\n",
1558 (udata) ? "User lib" : "kernel", pd);
1560 if (init_attr->srq_type != IB_SRQT_BASIC &&
1561 init_attr->srq_type != IB_SRQT_XRC)
1564 rc = qedr_check_srq_params(dev, init_attr, udata);
1569 srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1570 hw_srq = &srq->hw_srq;
1571 spin_lock_init(&srq->lock);
1573 hw_srq->max_wr = init_attr->attr.max_wr;
1574 hw_srq->max_sges = init_attr->attr.max_sge;
1577 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1580 "create srq: problem copying data from user space\n");
1584 rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1588 page_cnt = srq->usrq.pbl_info.num_pbes;
1589 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1590 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1591 page_size = PAGE_SIZE;
1593 struct qed_chain *pbl;
1595 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1600 page_cnt = qed_chain_get_page_cnt(pbl);
1601 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1602 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1603 page_size = QED_CHAIN_PAGE_SIZE;
1606 in_params.pd_id = pd->pd_id;
1607 in_params.pbl_base_addr = pbl_base_addr;
1608 in_params.prod_pair_addr = phy_prod_pair_addr;
1609 in_params.num_pages = page_cnt;
1610 in_params.page_size = page_size;
1612 struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1613 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1615 in_params.is_xrc = 1;
1616 in_params.xrcd_id = xrcd->xrcd_id;
1617 in_params.cq_cid = cq->icid;
1620 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1624 srq->srq_id = out_params.srq_id;
1627 rc = qedr_copy_srq_uresp(dev, srq, udata);
1632 rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1636 DP_DEBUG(dev, QEDR_MSG_SRQ,
1637 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1641 destroy_in_params.srq_id = srq->srq_id;
1643 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1646 qedr_free_srq_user_params(srq);
1648 qedr_free_srq_kernel_params(srq);
1653 int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1655 struct qed_rdma_destroy_srq_in_params in_params = {};
1656 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1657 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1659 xa_erase_irq(&dev->srqs, srq->srq_id);
1660 in_params.srq_id = srq->srq_id;
1661 in_params.is_xrc = srq->is_xrc;
1662 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1665 qedr_free_srq_user_params(srq);
1667 qedr_free_srq_kernel_params(srq);
1669 DP_DEBUG(dev, QEDR_MSG_SRQ,
1670 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1675 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1676 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1678 struct qed_rdma_modify_srq_in_params in_params = {};
1679 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1680 struct qedr_srq *srq = get_qedr_srq(ibsrq);
1683 if (attr_mask & IB_SRQ_MAX_WR) {
1685 "modify srq: invalid attribute mask=0x%x specified for %p\n",
1690 if (attr_mask & IB_SRQ_LIMIT) {
1691 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1693 "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1694 attr->srq_limit, srq->hw_srq.max_wr);
1698 in_params.srq_id = srq->srq_id;
1699 in_params.wqe_limit = attr->srq_limit;
1700 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1705 srq->srq_limit = attr->srq_limit;
1707 DP_DEBUG(dev, QEDR_MSG_SRQ,
1708 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1713 static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1715 switch (ib_qp_type) {
1717 return QED_RDMA_QP_TYPE_RC;
1718 case IB_QPT_XRC_INI:
1719 return QED_RDMA_QP_TYPE_XRC_INI;
1720 case IB_QPT_XRC_TGT:
1721 return QED_RDMA_QP_TYPE_XRC_TGT;
1723 return QED_RDMA_QP_TYPE_INVAL;
1728 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1731 struct ib_qp_init_attr *attrs,
1732 bool fmr_and_reserved_lkey,
1733 struct qed_rdma_create_qp_in_params *params)
1735 /* QP handle to be written in an async event */
1736 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1737 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1739 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1740 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1741 params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1742 params->stats_queue = 0;
1745 params->pd = pd->pd_id;
1746 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1749 if (qedr_qp_has_sq(qp))
1750 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1752 if (qedr_qp_has_rq(qp))
1753 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1755 if (qedr_qp_has_srq(qp)) {
1756 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1757 params->srq_id = qp->srq->srq_id;
1758 params->use_srq = true;
1761 params->use_srq = false;
1765 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1767 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1775 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1776 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1777 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1778 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1782 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1784 struct qed_rdma_create_qp_out_params *out_params)
1786 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1787 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1789 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1790 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1792 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1793 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1796 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1797 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1800 static void qedr_cleanup_user(struct qedr_dev *dev,
1801 struct qedr_ucontext *ctx,
1804 if (qedr_qp_has_sq(qp)) {
1805 ib_umem_release(qp->usq.umem);
1806 qp->usq.umem = NULL;
1809 if (qedr_qp_has_rq(qp)) {
1810 ib_umem_release(qp->urq.umem);
1811 qp->urq.umem = NULL;
1814 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1815 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1816 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1818 kfree(qp->usq.pbl_tbl);
1819 kfree(qp->urq.pbl_tbl);
1822 if (qp->usq.db_rec_data) {
1823 qedr_db_recovery_del(dev, qp->usq.db_addr,
1824 &qp->usq.db_rec_data->db_data);
1825 rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1828 if (qp->urq.db_rec_data) {
1829 qedr_db_recovery_del(dev, qp->urq.db_addr,
1830 &qp->urq.db_rec_data->db_data);
1831 rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1834 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1835 qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1836 &qp->urq.db_rec_db2_data);
1839 static int qedr_create_user_qp(struct qedr_dev *dev,
1842 struct ib_udata *udata,
1843 struct ib_qp_init_attr *attrs)
1845 struct qed_rdma_create_qp_in_params in_params;
1846 struct qed_rdma_create_qp_out_params out_params;
1847 struct qedr_create_qp_uresp uresp = {};
1848 struct qedr_create_qp_ureq ureq = {};
1849 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1850 struct qedr_ucontext *ctx = NULL;
1851 struct qedr_pd *pd = NULL;
1854 qp->create_type = QEDR_QP_CREATE_USER;
1857 pd = get_qedr_pd(ibpd);
1862 rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1865 DP_ERR(dev, "Problem copying data from user space\n");
1870 if (qedr_qp_has_sq(qp)) {
1871 /* SQ - read access only (0) */
1872 rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1873 ureq.sq_len, true, 0, alloc_and_init);
1878 if (qedr_qp_has_rq(qp)) {
1879 /* RQ - read access only (0) */
1880 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1881 ureq.rq_len, true, 0, alloc_and_init);
1886 memset(&in_params, 0, sizeof(in_params));
1887 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1888 in_params.qp_handle_lo = ureq.qp_handle_lo;
1889 in_params.qp_handle_hi = ureq.qp_handle_hi;
1891 if (qp->qp_type == IB_QPT_XRC_TGT) {
1892 struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1894 in_params.xrcd_id = xrcd->xrcd_id;
1895 in_params.qp_handle_lo = qp->qp_id;
1896 in_params.use_srq = 1;
1899 if (qedr_qp_has_sq(qp)) {
1900 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1901 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1904 if (qedr_qp_has_rq(qp)) {
1905 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1906 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1910 SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1912 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1913 &in_params, &out_params);
1920 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1921 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1923 qp->qp_id = out_params.qp_id;
1924 qp->icid = out_params.icid;
1927 rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1932 /* db offset was calculated in copy_qp_uresp, now set in the user q */
1933 if (qedr_qp_has_sq(qp)) {
1934 qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1935 qp->sq.max_wr = attrs->cap.max_send_wr;
1936 rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1937 &qp->usq.db_rec_data->db_data,
1944 if (qedr_qp_has_rq(qp)) {
1945 qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1946 qp->rq.max_wr = attrs->cap.max_recv_wr;
1947 rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1948 &qp->urq.db_rec_data->db_data,
1955 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1956 qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1958 /* calculate the db_rec_db2 data since it is constant so no
1959 * need to reflect from user
1961 qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1962 qp->urq.db_rec_db2_data.data.value =
1963 cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1965 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1966 &qp->urq.db_rec_db2_data,
1972 qedr_qp_user_print(dev, qp);
1975 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1977 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1980 qedr_cleanup_user(dev, ctx, qp);
1984 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1988 qp->sq.db = dev->db_addr +
1989 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1990 qp->sq.db_data.data.icid = qp->icid;
1992 rc = qedr_db_recovery_add(dev, qp->sq.db,
1999 qp->rq.db = dev->db_addr +
2000 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2001 qp->rq.db_data.data.icid = qp->icid;
2002 qp->rq.iwarp_db2 = dev->db_addr +
2003 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2004 qp->rq.iwarp_db2_data.data.icid = qp->icid;
2005 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2007 rc = qedr_db_recovery_add(dev, qp->rq.db,
2014 rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2015 &qp->rq.iwarp_db2_data,
2022 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2024 struct qed_rdma_create_qp_in_params *in_params,
2025 u32 n_sq_elems, u32 n_rq_elems)
2027 struct qed_rdma_create_qp_out_params out_params;
2028 struct qed_chain_init_params params = {
2029 .mode = QED_CHAIN_MODE_PBL,
2030 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2034 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2035 params.num_elems = n_sq_elems;
2036 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2038 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2042 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2043 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2045 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2046 params.num_elems = n_rq_elems;
2047 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2049 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2053 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2054 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2056 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2057 in_params, &out_params);
2062 qp->qp_id = out_params.qp_id;
2063 qp->icid = out_params.icid;
2065 return qedr_set_roce_db_info(dev, qp);
2069 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2071 struct qed_rdma_create_qp_in_params *in_params,
2072 u32 n_sq_elems, u32 n_rq_elems)
2074 struct qed_rdma_create_qp_out_params out_params;
2075 struct qed_chain_init_params params = {
2076 .mode = QED_CHAIN_MODE_PBL,
2077 .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2081 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2082 QEDR_SQE_ELEMENT_SIZE,
2083 QED_CHAIN_PAGE_SIZE,
2084 QED_CHAIN_MODE_PBL);
2085 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2086 QEDR_RQE_ELEMENT_SIZE,
2087 QED_CHAIN_PAGE_SIZE,
2088 QED_CHAIN_MODE_PBL);
2090 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2091 in_params, &out_params);
2096 /* Now we allocate the chain */
2098 params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2099 params.num_elems = n_sq_elems;
2100 params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2101 params.ext_pbl_virt = out_params.sq_pbl_virt;
2102 params.ext_pbl_phys = out_params.sq_pbl_phys;
2104 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms);
2108 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2109 params.num_elems = n_rq_elems;
2110 params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2111 params.ext_pbl_virt = out_params.rq_pbl_virt;
2112 params.ext_pbl_phys = out_params.rq_pbl_phys;
2114 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
2118 qp->qp_id = out_params.qp_id;
2119 qp->icid = out_params.icid;
2121 return qedr_set_iwarp_db_info(dev, qp);
2124 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2129 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2131 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2132 kfree(qp->wqe_wr_id);
2134 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2135 kfree(qp->rqe_wr_id);
2137 /* GSI qp is not registered to db mechanism so no need to delete */
2138 if (qp->qp_type == IB_QPT_GSI)
2141 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2144 qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2146 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2147 qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2148 &qp->rq.iwarp_db2_data);
2152 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2155 struct ib_qp_init_attr *attrs)
2157 struct qed_rdma_create_qp_in_params in_params;
2158 struct qedr_pd *pd = get_qedr_pd(ibpd);
2164 memset(&in_params, 0, sizeof(in_params));
2165 qp->create_type = QEDR_QP_CREATE_KERNEL;
2167 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2168 * the ring. The ring should allow at least a single WR, even if the
2169 * user requested none, due to allocation issues.
2170 * We should add an extra WR since the prod and cons indices of
2171 * wqe_wr_id are managed in such a way that the WQ is considered full
2172 * when (prod+1)%max_wr==cons. We currently don't do that because we
2173 * double the number of entries due an iSER issue that pushes far more
2174 * WRs than indicated. If we decline its ib_post_send() then we get
2175 * error prints in the dmesg we'd like to avoid.
2177 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2180 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2182 if (!qp->wqe_wr_id) {
2183 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2187 /* QP handle to be written in CQE */
2188 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2189 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2191 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2192 * the ring. There ring should allow at least a single WR, even if the
2193 * user requested none, due to allocation issues.
2195 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2197 /* Allocate driver internal RQ array */
2198 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2200 if (!qp->rqe_wr_id) {
2202 "create qp: failed RQ shadow memory allocation\n");
2203 kfree(qp->wqe_wr_id);
2207 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2209 n_sq_entries = attrs->cap.max_send_wr;
2210 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2211 n_sq_entries = max_t(u32, n_sq_entries, 1);
2212 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2214 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2216 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2217 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2218 n_sq_elems, n_rq_elems);
2220 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2221 n_sq_elems, n_rq_elems);
2223 qedr_cleanup_kernel(dev, qp);
2228 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2229 struct ib_udata *udata)
2231 struct qedr_ucontext *ctx =
2232 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2236 if (qp->qp_type != IB_QPT_GSI) {
2237 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2242 if (qp->create_type == QEDR_QP_CREATE_USER)
2243 qedr_cleanup_user(dev, ctx, qp);
2245 qedr_cleanup_kernel(dev, qp);
2250 int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
2251 struct ib_udata *udata)
2253 struct qedr_xrcd *xrcd = NULL;
2254 struct ib_pd *ibpd = ibqp->pd;
2255 struct qedr_pd *pd = get_qedr_pd(ibpd);
2256 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2257 struct qedr_qp *qp = get_qedr_qp(ibqp);
2260 if (attrs->create_flags)
2263 if (attrs->qp_type == IB_QPT_XRC_TGT)
2264 xrcd = get_qedr_xrcd(attrs->xrcd);
2266 pd = get_qedr_pd(ibpd);
2268 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2269 udata ? "user library" : "kernel", pd);
2271 rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2275 DP_DEBUG(dev, QEDR_MSG_QP,
2276 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2277 udata ? "user library" : "kernel", attrs->event_handler, pd,
2278 get_qedr_cq(attrs->send_cq),
2279 get_qedr_cq(attrs->send_cq)->icid,
2280 get_qedr_cq(attrs->recv_cq),
2281 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2283 qedr_set_common_qp_params(dev, qp, pd, attrs);
2285 if (attrs->qp_type == IB_QPT_GSI)
2286 return qedr_create_gsi_qp(dev, attrs, qp);
2289 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2291 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2296 qp->ibqp.qp_num = qp->qp_id;
2298 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2299 rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2301 goto out_free_qp_resources;
2306 out_free_qp_resources:
2307 qedr_free_qp_resources(dev, qp, udata);
2311 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2314 case QED_ROCE_QP_STATE_RESET:
2315 return IB_QPS_RESET;
2316 case QED_ROCE_QP_STATE_INIT:
2318 case QED_ROCE_QP_STATE_RTR:
2320 case QED_ROCE_QP_STATE_RTS:
2322 case QED_ROCE_QP_STATE_SQD:
2324 case QED_ROCE_QP_STATE_ERR:
2326 case QED_ROCE_QP_STATE_SQE:
2332 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2333 enum ib_qp_state qp_state)
2337 return QED_ROCE_QP_STATE_RESET;
2339 return QED_ROCE_QP_STATE_INIT;
2341 return QED_ROCE_QP_STATE_RTR;
2343 return QED_ROCE_QP_STATE_RTS;
2345 return QED_ROCE_QP_STATE_SQD;
2347 return QED_ROCE_QP_STATE_ERR;
2349 return QED_ROCE_QP_STATE_ERR;
2353 static int qedr_update_qp_state(struct qedr_dev *dev,
2355 enum qed_roce_qp_state cur_state,
2356 enum qed_roce_qp_state new_state)
2360 if (new_state == cur_state)
2363 switch (cur_state) {
2364 case QED_ROCE_QP_STATE_RESET:
2365 switch (new_state) {
2366 case QED_ROCE_QP_STATE_INIT:
2373 case QED_ROCE_QP_STATE_INIT:
2374 switch (new_state) {
2375 case QED_ROCE_QP_STATE_RTR:
2376 /* Update doorbell (in case post_recv was
2377 * done before move to RTR)
2380 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2381 writel(qp->rq.db_data.raw, qp->rq.db);
2384 case QED_ROCE_QP_STATE_ERR:
2387 /* Invalid state change. */
2392 case QED_ROCE_QP_STATE_RTR:
2394 switch (new_state) {
2395 case QED_ROCE_QP_STATE_RTS:
2397 case QED_ROCE_QP_STATE_ERR:
2400 /* Invalid state change. */
2405 case QED_ROCE_QP_STATE_RTS:
2407 switch (new_state) {
2408 case QED_ROCE_QP_STATE_SQD:
2410 case QED_ROCE_QP_STATE_ERR:
2413 /* Invalid state change. */
2418 case QED_ROCE_QP_STATE_SQD:
2420 switch (new_state) {
2421 case QED_ROCE_QP_STATE_RTS:
2422 case QED_ROCE_QP_STATE_ERR:
2425 /* Invalid state change. */
2430 case QED_ROCE_QP_STATE_ERR:
2432 switch (new_state) {
2433 case QED_ROCE_QP_STATE_RESET:
2434 if ((qp->rq.prod != qp->rq.cons) ||
2435 (qp->sq.prod != qp->sq.cons)) {
2437 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2438 qp->rq.prod, qp->rq.cons, qp->sq.prod,
2456 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2457 int attr_mask, struct ib_udata *udata)
2459 struct qedr_qp *qp = get_qedr_qp(ibqp);
2460 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2461 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2462 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2463 enum ib_qp_state old_qp_state, new_qp_state;
2464 enum qed_roce_qp_state cur_state;
2467 DP_DEBUG(dev, QEDR_MSG_QP,
2468 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2471 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2474 old_qp_state = qedr_get_ibqp_state(qp->state);
2475 if (attr_mask & IB_QP_STATE)
2476 new_qp_state = attr->qp_state;
2478 new_qp_state = old_qp_state;
2480 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2481 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2482 ibqp->qp_type, attr_mask)) {
2484 "modify qp: invalid attribute mask=0x%x specified for\n"
2485 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2486 attr_mask, qp->qp_id, ibqp->qp_type,
2487 old_qp_state, new_qp_state);
2493 /* Translate the masks... */
2494 if (attr_mask & IB_QP_STATE) {
2495 SET_FIELD(qp_params.modify_flags,
2496 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2497 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2500 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2501 qp_params.sqd_async = true;
2503 if (attr_mask & IB_QP_PKEY_INDEX) {
2504 SET_FIELD(qp_params.modify_flags,
2505 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2506 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2511 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2514 if (attr_mask & IB_QP_QKEY)
2515 qp->qkey = attr->qkey;
2517 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2518 SET_FIELD(qp_params.modify_flags,
2519 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2520 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2521 IB_ACCESS_REMOTE_READ;
2522 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2523 IB_ACCESS_REMOTE_WRITE;
2524 qp_params.incoming_atomic_en = attr->qp_access_flags &
2525 IB_ACCESS_REMOTE_ATOMIC;
2528 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2529 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2532 if (attr_mask & IB_QP_PATH_MTU) {
2533 if (attr->path_mtu < IB_MTU_256 ||
2534 attr->path_mtu > IB_MTU_4096) {
2535 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2539 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2540 ib_mtu_enum_to_int(iboe_get_mtu
2546 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2547 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2550 SET_FIELD(qp_params.modify_flags,
2551 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2553 qp_params.traffic_class_tos = grh->traffic_class;
2554 qp_params.flow_label = grh->flow_label;
2555 qp_params.hop_limit_ttl = grh->hop_limit;
2557 qp->sgid_idx = grh->sgid_index;
2559 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2562 "modify qp: problems with GID index %d (rc=%d)\n",
2563 grh->sgid_index, rc);
2567 rc = qedr_get_dmac(dev, &attr->ah_attr,
2568 qp_params.remote_mac_addr);
2572 qp_params.use_local_mac = true;
2573 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2575 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2576 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2577 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2578 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2579 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2580 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2581 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2582 qp_params.remote_mac_addr);
2584 qp_params.mtu = qp->mtu;
2585 qp_params.lb_indication = false;
2588 if (!qp_params.mtu) {
2589 /* Stay with current MTU */
2591 qp_params.mtu = qp->mtu;
2594 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2597 if (attr_mask & IB_QP_TIMEOUT) {
2598 SET_FIELD(qp_params.modify_flags,
2599 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2601 /* The received timeout value is an exponent used like this:
2602 * "12.7.34 LOCAL ACK TIMEOUT
2603 * Value representing the transport (ACK) timeout for use by
2604 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2605 * The FW expects timeout in msec so we need to divide the usec
2606 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2607 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2608 * The value of zero means infinite so we use a 'max_t' to make
2609 * sure that sub 1 msec values will be configured as 1 msec.
2612 qp_params.ack_timeout =
2613 1 << max_t(int, attr->timeout - 8, 0);
2615 qp_params.ack_timeout = 0;
2618 if (attr_mask & IB_QP_RETRY_CNT) {
2619 SET_FIELD(qp_params.modify_flags,
2620 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2621 qp_params.retry_cnt = attr->retry_cnt;
2624 if (attr_mask & IB_QP_RNR_RETRY) {
2625 SET_FIELD(qp_params.modify_flags,
2626 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2627 qp_params.rnr_retry_cnt = attr->rnr_retry;
2630 if (attr_mask & IB_QP_RQ_PSN) {
2631 SET_FIELD(qp_params.modify_flags,
2632 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2633 qp_params.rq_psn = attr->rq_psn;
2634 qp->rq_psn = attr->rq_psn;
2637 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2638 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2641 "unsupported max_rd_atomic=%d, supported=%d\n",
2642 attr->max_rd_atomic,
2643 dev->attr.max_qp_req_rd_atomic_resc);
2647 SET_FIELD(qp_params.modify_flags,
2648 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2649 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2652 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2653 SET_FIELD(qp_params.modify_flags,
2654 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2655 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2658 if (attr_mask & IB_QP_SQ_PSN) {
2659 SET_FIELD(qp_params.modify_flags,
2660 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2661 qp_params.sq_psn = attr->sq_psn;
2662 qp->sq_psn = attr->sq_psn;
2665 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2666 if (attr->max_dest_rd_atomic >
2667 dev->attr.max_qp_resp_rd_atomic_resc) {
2669 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2670 attr->max_dest_rd_atomic,
2671 dev->attr.max_qp_resp_rd_atomic_resc);
2677 SET_FIELD(qp_params.modify_flags,
2678 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2679 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2682 if (attr_mask & IB_QP_DEST_QPN) {
2683 SET_FIELD(qp_params.modify_flags,
2684 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2686 qp_params.dest_qp = attr->dest_qp_num;
2687 qp->dest_qp_num = attr->dest_qp_num;
2690 cur_state = qp->state;
2692 /* Update the QP state before the actual ramrod to prevent a race with
2693 * fast path. Modifying the QP state to error will cause the device to
2694 * flush the CQEs and while polling the flushed CQEs will considered as
2695 * a potential issue if the QP isn't in error state.
2697 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2698 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2699 qp->state = QED_ROCE_QP_STATE_ERR;
2701 if (qp->qp_type != IB_QPT_GSI)
2702 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2703 qp->qed_qp, &qp_params);
2705 if (attr_mask & IB_QP_STATE) {
2706 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2707 rc = qedr_update_qp_state(dev, qp, cur_state,
2708 qp_params.new_state);
2709 qp->state = qp_params.new_state;
2716 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2718 int ib_qp_acc_flags = 0;
2720 if (params->incoming_rdma_write_en)
2721 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2722 if (params->incoming_rdma_read_en)
2723 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2724 if (params->incoming_atomic_en)
2725 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2726 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2727 return ib_qp_acc_flags;
2730 int qedr_query_qp(struct ib_qp *ibqp,
2731 struct ib_qp_attr *qp_attr,
2732 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2734 struct qed_rdma_query_qp_out_params params;
2735 struct qedr_qp *qp = get_qedr_qp(ibqp);
2736 struct qedr_dev *dev = qp->dev;
2739 memset(¶ms, 0, sizeof(params));
2740 memset(qp_attr, 0, sizeof(*qp_attr));
2741 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2743 if (qp->qp_type != IB_QPT_GSI) {
2744 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms);
2747 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2749 qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2752 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2753 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2754 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2755 qp_attr->rq_psn = params.rq_psn;
2756 qp_attr->sq_psn = params.sq_psn;
2757 qp_attr->dest_qp_num = params.dest_qp;
2759 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(¶ms);
2761 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2762 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2763 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2764 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2765 qp_attr->cap.max_inline_data = dev->attr.max_inline;
2766 qp_init_attr->cap = qp_attr->cap;
2768 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2769 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2770 params.flow_label, qp->sgid_idx,
2771 params.hop_limit_ttl, params.traffic_class_tos);
2772 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, ¶ms.dgid.bytes[0]);
2773 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2774 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2775 qp_attr->timeout = params.timeout;
2776 qp_attr->rnr_retry = params.rnr_retry;
2777 qp_attr->retry_cnt = params.retry_cnt;
2778 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2779 qp_attr->pkey_index = params.pkey_index;
2780 qp_attr->port_num = 1;
2781 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2782 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2783 qp_attr->alt_pkey_index = 0;
2784 qp_attr->alt_port_num = 0;
2785 qp_attr->alt_timeout = 0;
2786 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2788 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2789 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2790 qp_attr->max_rd_atomic = params.max_rd_atomic;
2791 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2793 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2794 qp_attr->cap.max_inline_data);
2800 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2802 struct qedr_qp *qp = get_qedr_qp(ibqp);
2803 struct qedr_dev *dev = qp->dev;
2804 struct ib_qp_attr attr;
2807 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2810 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2811 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2812 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2813 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2815 attr.qp_state = IB_QPS_ERR;
2816 attr_mask |= IB_QP_STATE;
2818 /* Change the QP state to ERROR */
2819 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2822 /* If connection establishment started the WAIT_FOR_CONNECT
2823 * bit will be on and we need to Wait for the establishment
2824 * to complete before destroying the qp.
2826 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2827 &qp->iwarp_cm_flags))
2828 wait_for_completion(&qp->iwarp_cm_comp);
2830 /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2831 * bit will be on, and we need to wait for the disconnect to
2832 * complete before continuing. We can use the same completion,
2833 * iwarp_cm_comp, since this is the only place that waits for
2834 * this completion and it is sequential. In addition,
2835 * disconnect can't occur before the connection is fully
2836 * established, therefore if WAIT_FOR_DISCONNECT is on it
2837 * means WAIT_FOR_CONNECT is also on and the completion for
2838 * CONNECT already occurred.
2840 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2841 &qp->iwarp_cm_flags))
2842 wait_for_completion(&qp->iwarp_cm_comp);
2845 if (qp->qp_type == IB_QPT_GSI)
2846 qedr_destroy_gsi_qp(dev);
2848 /* We need to remove the entry from the xarray before we release the
2849 * qp_id to avoid a race of the qp_id being reallocated and failing
2852 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2853 xa_erase(&dev->qps, qp->qp_id);
2855 qedr_free_qp_resources(dev, qp, udata);
2857 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2858 qedr_iw_qp_rem_ref(&qp->ibqp);
2859 wait_for_completion(&qp->qp_rel_comp);
2865 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2866 struct ib_udata *udata)
2868 struct qedr_ah *ah = get_qedr_ah(ibah);
2870 rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2875 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2877 struct qedr_ah *ah = get_qedr_ah(ibah);
2879 rdma_destroy_ah_attr(&ah->attr);
2883 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2885 struct qedr_pbl *pbl, *tmp;
2887 if (info->pbl_table)
2888 list_add_tail(&info->pbl_table->list_entry,
2889 &info->free_pbl_list);
2891 if (!list_empty(&info->inuse_pbl_list))
2892 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2894 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2895 list_del(&pbl->list_entry);
2896 qedr_free_pbl(dev, &info->pbl_info, pbl);
2900 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2901 size_t page_list_len, bool two_layered)
2903 struct qedr_pbl *tmp;
2906 INIT_LIST_HEAD(&info->free_pbl_list);
2907 INIT_LIST_HEAD(&info->inuse_pbl_list);
2909 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2910 page_list_len, two_layered);
2914 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2915 if (IS_ERR(info->pbl_table)) {
2916 rc = PTR_ERR(info->pbl_table);
2920 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2921 &info->pbl_table->pa);
2923 /* in usual case we use 2 PBLs, so we add one to free
2924 * list and allocating another one
2926 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2928 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2932 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2934 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2938 free_mr_info(dev, info);
2943 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2944 u64 usr_addr, int acc, struct ib_udata *udata)
2946 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2951 pd = get_qedr_pd(ibpd);
2952 DP_DEBUG(dev, QEDR_MSG_MR,
2953 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2954 pd->pd_id, start, len, usr_addr, acc);
2956 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2957 return ERR_PTR(-EINVAL);
2959 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2963 mr->type = QEDR_MR_USER;
2965 mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2966 if (IS_ERR(mr->umem)) {
2971 rc = init_mr_info(dev, &mr->info,
2972 ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2976 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2977 &mr->info.pbl_info, PAGE_SHIFT);
2979 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2982 DP_ERR(dev, "Out of MR resources\n");
2984 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
2989 /* Index only, 18 bit long, lkey = itid << 8 | key */
2990 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2992 mr->hw_mr.pd = pd->pd_id;
2993 mr->hw_mr.local_read = 1;
2994 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2995 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2996 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2997 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2998 mr->hw_mr.mw_bind = false;
2999 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3000 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3001 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3002 mr->hw_mr.page_size_log = PAGE_SHIFT;
3003 mr->hw_mr.length = len;
3004 mr->hw_mr.vaddr = usr_addr;
3005 mr->hw_mr.phy_mr = false;
3006 mr->hw_mr.dma_mr = false;
3008 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3010 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3014 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3015 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3016 mr->hw_mr.remote_atomic)
3017 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3019 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3024 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3026 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3032 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3034 struct qedr_mr *mr = get_qedr_mr(ib_mr);
3035 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3038 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3042 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3044 if (mr->type != QEDR_MR_DMA)
3045 free_mr_info(dev, &mr->info);
3047 /* it could be user registered memory. */
3048 ib_umem_release(mr->umem);
3055 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3056 int max_page_list_len)
3058 struct qedr_pd *pd = get_qedr_pd(ibpd);
3059 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3063 DP_DEBUG(dev, QEDR_MSG_MR,
3064 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3067 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3072 mr->type = QEDR_MR_FRMR;
3074 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3078 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3081 DP_ERR(dev, "Out of MR resources\n");
3083 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3088 /* Index only, 18 bit long, lkey = itid << 8 | key */
3089 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3091 mr->hw_mr.pd = pd->pd_id;
3092 mr->hw_mr.local_read = 1;
3093 mr->hw_mr.local_write = 0;
3094 mr->hw_mr.remote_read = 0;
3095 mr->hw_mr.remote_write = 0;
3096 mr->hw_mr.remote_atomic = 0;
3097 mr->hw_mr.mw_bind = false;
3098 mr->hw_mr.pbl_ptr = 0;
3099 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3100 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3101 mr->hw_mr.length = 0;
3102 mr->hw_mr.vaddr = 0;
3103 mr->hw_mr.phy_mr = true;
3104 mr->hw_mr.dma_mr = false;
3106 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3108 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3112 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3113 mr->ibmr.rkey = mr->ibmr.lkey;
3115 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3119 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3125 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3130 if (mr_type != IB_MR_TYPE_MEM_REG)
3131 return ERR_PTR(-EINVAL);
3133 mr = __qedr_alloc_mr(ibpd, max_num_sg);
3136 return ERR_PTR(-EINVAL);
3141 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3143 struct qedr_mr *mr = get_qedr_mr(ibmr);
3144 struct qedr_pbl *pbl_table;
3145 struct regpair *pbe;
3148 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3149 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3153 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3156 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3157 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3158 pbe = (struct regpair *)pbl_table->va;
3159 pbe += mr->npages % pbes_in_page;
3160 pbe->lo = cpu_to_le32((u32)addr);
3161 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3168 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3170 int work = info->completed - info->completed_handled - 1;
3172 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3173 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3174 struct qedr_pbl *pbl;
3176 /* Free all the page list that are possible to be freed
3177 * (all the ones that were invalidated), under the assumption
3178 * that if an FMR was completed successfully that means that
3179 * if there was an invalidate operation before it also ended
3181 pbl = list_first_entry(&info->inuse_pbl_list,
3182 struct qedr_pbl, list_entry);
3183 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3184 info->completed_handled++;
3188 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3189 int sg_nents, unsigned int *sg_offset)
3191 struct qedr_mr *mr = get_qedr_mr(ibmr);
3195 handle_completed_mrs(mr->dev, &mr->info);
3196 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3199 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3201 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3202 struct qedr_pd *pd = get_qedr_pd(ibpd);
3206 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3208 return ERR_PTR(-ENOMEM);
3210 mr->type = QEDR_MR_DMA;
3212 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3215 DP_ERR(dev, "Out of MR resources\n");
3217 DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3222 /* index only, 18 bit long, lkey = itid << 8 | key */
3223 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3224 mr->hw_mr.pd = pd->pd_id;
3225 mr->hw_mr.local_read = 1;
3226 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3227 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3228 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3229 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3230 mr->hw_mr.dma_mr = true;
3232 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3234 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3238 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3239 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3240 mr->hw_mr.remote_atomic)
3241 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3243 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3247 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3253 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3255 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3258 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3262 for (i = 0; i < num_sge; i++)
3263 len += sg_list[i].length;
3268 static void swap_wqe_data64(u64 *p)
3272 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3273 *p = cpu_to_be64(cpu_to_le64(*p));
3276 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3277 struct qedr_qp *qp, u8 *wqe_size,
3278 const struct ib_send_wr *wr,
3279 const struct ib_send_wr **bad_wr,
3282 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3283 char *seg_prt, *wqe;
3286 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3287 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3301 /* Copy data inline */
3302 for (i = 0; i < wr->num_sge; i++) {
3303 u32 len = wr->sg_list[i].length;
3304 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3309 /* New segment required */
3311 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3313 seg_siz = sizeof(struct rdma_sq_common_wqe);
3317 /* Calculate currently allowed length */
3318 cur = min_t(u32, len, seg_siz);
3319 memcpy(seg_prt, src, cur);
3321 /* Update segment variables */
3325 /* Update sge variables */
3329 /* Swap fully-completed segments */
3331 swap_wqe_data64((u64 *)wqe);
3335 /* swap last not completed segment */
3337 swap_wqe_data64((u64 *)wqe);
3342 #define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
3344 DMA_REGPAIR_LE(sge->addr, vaddr); \
3345 (sge)->length = cpu_to_le32(vlength); \
3346 (sge)->flags = cpu_to_le32(vflags); \
3349 #define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
3351 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3352 (hdr)->num_sges = num_sge; \
3355 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
3357 DMA_REGPAIR_LE(sge->addr, vaddr); \
3358 (sge)->length = cpu_to_le32(vlength); \
3359 (sge)->l_key = cpu_to_le32(vlkey); \
3362 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3363 const struct ib_send_wr *wr)
3368 for (i = 0; i < wr->num_sge; i++) {
3369 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3371 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3372 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3373 sge->length = cpu_to_le32(wr->sg_list[i].length);
3374 data_size += wr->sg_list[i].length;
3378 *wqe_size += wr->num_sge;
3383 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3385 struct rdma_sq_rdma_wqe_1st *rwqe,
3386 struct rdma_sq_rdma_wqe_2nd *rwqe2,
3387 const struct ib_send_wr *wr,
3388 const struct ib_send_wr **bad_wr)
3390 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3391 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3393 if (wr->send_flags & IB_SEND_INLINE &&
3394 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3395 wr->opcode == IB_WR_RDMA_WRITE)) {
3398 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3399 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3400 bad_wr, &rwqe->flags, flags);
3403 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3406 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3408 struct rdma_sq_send_wqe_1st *swqe,
3409 struct rdma_sq_send_wqe_2st *swqe2,
3410 const struct ib_send_wr *wr,
3411 const struct ib_send_wr **bad_wr)
3413 memset(swqe2, 0, sizeof(*swqe2));
3414 if (wr->send_flags & IB_SEND_INLINE) {
3417 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3418 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3419 bad_wr, &swqe->flags, flags);
3422 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3425 static int qedr_prepare_reg(struct qedr_qp *qp,
3426 struct rdma_sq_fmr_wqe_1st *fwqe1,
3427 const struct ib_reg_wr *wr)
3429 struct qedr_mr *mr = get_qedr_mr(wr->mr);
3430 struct rdma_sq_fmr_wqe_2nd *fwqe2;
3432 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3433 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3434 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3435 fwqe1->l_key = wr->key;
3437 fwqe2->access_ctrl = 0;
3439 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3440 !!(wr->access & IB_ACCESS_REMOTE_READ));
3441 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3442 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3443 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3444 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3445 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3446 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3447 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3448 fwqe2->fmr_ctrl = 0;
3450 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3451 ilog2(mr->ibmr.page_size) - 12);
3453 fwqe2->length_hi = 0;
3454 fwqe2->length_lo = mr->ibmr.length;
3455 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3456 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3458 qp->wqe_wr_id[qp->sq.prod].mr = mr;
3463 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3466 case IB_WR_RDMA_WRITE:
3467 case IB_WR_RDMA_WRITE_WITH_IMM:
3468 return IB_WC_RDMA_WRITE;
3469 case IB_WR_SEND_WITH_IMM:
3471 case IB_WR_SEND_WITH_INV:
3473 case IB_WR_RDMA_READ:
3474 case IB_WR_RDMA_READ_WITH_INV:
3475 return IB_WC_RDMA_READ;
3476 case IB_WR_ATOMIC_CMP_AND_SWP:
3477 return IB_WC_COMP_SWAP;
3478 case IB_WR_ATOMIC_FETCH_AND_ADD:
3479 return IB_WC_FETCH_ADD;
3481 return IB_WC_REG_MR;
3482 case IB_WR_LOCAL_INV:
3483 return IB_WC_LOCAL_INV;
3489 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3490 const struct ib_send_wr *wr)
3492 int wq_is_full, err_wr, pbl_is_full;
3493 struct qedr_dev *dev = qp->dev;
3495 /* prevent SQ overflow and/or processing of a bad WR */
3496 err_wr = wr->num_sge > qp->sq.max_sges;
3497 wq_is_full = qedr_wq_is_full(&qp->sq);
3498 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3499 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3500 if (wq_is_full || err_wr || pbl_is_full) {
3501 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3503 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3505 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3508 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3510 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3512 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3516 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3518 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3520 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3527 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3528 const struct ib_send_wr **bad_wr)
3530 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3531 struct qedr_qp *qp = get_qedr_qp(ibqp);
3532 struct rdma_sq_atomic_wqe_1st *awqe1;
3533 struct rdma_sq_atomic_wqe_2nd *awqe2;
3534 struct rdma_sq_atomic_wqe_3rd *awqe3;
3535 struct rdma_sq_send_wqe_2st *swqe2;
3536 struct rdma_sq_local_inv_wqe *iwqe;
3537 struct rdma_sq_rdma_wqe_2nd *rwqe2;
3538 struct rdma_sq_send_wqe_1st *swqe;
3539 struct rdma_sq_rdma_wqe_1st *rwqe;
3540 struct rdma_sq_fmr_wqe_1st *fwqe1;
3541 struct rdma_sq_common_wqe *wqe;
3546 if (!qedr_can_post_send(qp, wr)) {
3551 wqe = qed_chain_produce(&qp->sq.pbl);
3552 qp->wqe_wr_id[qp->sq.prod].signaled =
3553 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3556 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3557 !!(wr->send_flags & IB_SEND_SOLICITED));
3558 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3559 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3560 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3561 !!(wr->send_flags & IB_SEND_FENCE));
3562 wqe->prev_wqe_size = qp->prev_wqe_size;
3564 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3566 switch (wr->opcode) {
3567 case IB_WR_SEND_WITH_IMM:
3568 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3573 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3574 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3576 swqe2 = qed_chain_produce(&qp->sq.pbl);
3578 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3579 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3581 swqe->length = cpu_to_le32(length);
3582 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3583 qp->prev_wqe_size = swqe->wqe_size;
3584 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3587 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3588 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3591 swqe2 = qed_chain_produce(&qp->sq.pbl);
3592 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3594 swqe->length = cpu_to_le32(length);
3595 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3596 qp->prev_wqe_size = swqe->wqe_size;
3597 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3599 case IB_WR_SEND_WITH_INV:
3600 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3601 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3602 swqe2 = qed_chain_produce(&qp->sq.pbl);
3604 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3605 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3607 swqe->length = cpu_to_le32(length);
3608 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3609 qp->prev_wqe_size = swqe->wqe_size;
3610 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3613 case IB_WR_RDMA_WRITE_WITH_IMM:
3614 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3619 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3620 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3623 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3624 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3625 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3627 rwqe->length = cpu_to_le32(length);
3628 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3629 qp->prev_wqe_size = rwqe->wqe_size;
3630 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3632 case IB_WR_RDMA_WRITE:
3633 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3634 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3637 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3638 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3640 rwqe->length = cpu_to_le32(length);
3641 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3642 qp->prev_wqe_size = rwqe->wqe_size;
3643 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3645 case IB_WR_RDMA_READ_WITH_INV:
3646 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3647 fallthrough; /* same is identical to RDMA READ */
3649 case IB_WR_RDMA_READ:
3650 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3651 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3654 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3655 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3657 rwqe->length = cpu_to_le32(length);
3658 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3659 qp->prev_wqe_size = rwqe->wqe_size;
3660 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3663 case IB_WR_ATOMIC_CMP_AND_SWP:
3664 case IB_WR_ATOMIC_FETCH_AND_ADD:
3665 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3666 awqe1->wqe_size = 4;
3668 awqe2 = qed_chain_produce(&qp->sq.pbl);
3669 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3670 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3672 awqe3 = qed_chain_produce(&qp->sq.pbl);
3674 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3675 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3676 DMA_REGPAIR_LE(awqe3->swap_data,
3677 atomic_wr(wr)->compare_add);
3679 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3680 DMA_REGPAIR_LE(awqe3->swap_data,
3681 atomic_wr(wr)->swap);
3682 DMA_REGPAIR_LE(awqe3->cmp_data,
3683 atomic_wr(wr)->compare_add);
3686 qedr_prepare_sq_sges(qp, NULL, wr);
3688 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3689 qp->prev_wqe_size = awqe1->wqe_size;
3692 case IB_WR_LOCAL_INV:
3693 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3696 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3697 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3698 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3699 qp->prev_wqe_size = iwqe->wqe_size;
3702 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3703 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3704 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3705 fwqe1->wqe_size = 2;
3707 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3709 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3714 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3715 qp->prev_wqe_size = fwqe1->wqe_size;
3718 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3727 /* Restore prod to its position before
3728 * this WR was processed
3730 value = le16_to_cpu(qp->sq.db_data.data.value);
3731 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3733 /* Restore prev_wqe_size */
3734 qp->prev_wqe_size = wqe->prev_wqe_size;
3736 DP_ERR(dev, "POST SEND FAILED\n");
3742 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3743 const struct ib_send_wr **bad_wr)
3745 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3746 struct qedr_qp *qp = get_qedr_qp(ibqp);
3747 unsigned long flags;
3752 if (qp->qp_type == IB_QPT_GSI)
3753 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3755 spin_lock_irqsave(&qp->q_lock, flags);
3757 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3758 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3759 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3760 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3761 spin_unlock_irqrestore(&qp->q_lock, flags);
3763 DP_DEBUG(dev, QEDR_MSG_CQ,
3764 "QP in wrong state! QP icid=0x%x state %d\n",
3765 qp->icid, qp->state);
3771 rc = __qedr_post_send(ibqp, wr, bad_wr);
3775 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3777 qedr_inc_sw_prod(&qp->sq);
3779 qp->sq.db_data.data.value++;
3785 * If there was a failure in the first WR then it will be triggered in
3786 * vane. However this is not harmful (as long as the producer value is
3787 * unchanged). For performance reasons we avoid checking for this
3788 * redundant doorbell.
3790 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3791 * soon as we give the doorbell, we could get a completion
3792 * for this wr, therefore we need to make sure that the
3793 * memory is updated before giving the doorbell.
3794 * During qedr_poll_cq, rmb is called before accessing the
3795 * cqe. This covers for the smp_rmb as well.
3798 writel(qp->sq.db_data.raw, qp->sq.db);
3800 spin_unlock_irqrestore(&qp->q_lock, flags);
3805 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3809 /* Calculate number of elements used based on producer
3810 * count and consumer count and subtract it from max
3811 * work request supported so that we get elements left.
3813 used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3815 return hw_srq->max_wr - used;
3818 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3819 const struct ib_recv_wr **bad_wr)
3821 struct qedr_srq *srq = get_qedr_srq(ibsrq);
3822 struct qedr_srq_hwq_info *hw_srq;
3823 struct qedr_dev *dev = srq->dev;
3824 struct qed_chain *pbl;
3825 unsigned long flags;
3829 spin_lock_irqsave(&srq->lock, flags);
3831 hw_srq = &srq->hw_srq;
3832 pbl = &srq->hw_srq.pbl;
3834 struct rdma_srq_wqe_header *hdr;
3837 if (!qedr_srq_elem_left(hw_srq) ||
3838 wr->num_sge > srq->hw_srq.max_sges) {
3839 DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
3840 hw_srq->wr_prod_cnt,
3841 atomic_read(&hw_srq->wr_cons_cnt),
3842 wr->num_sge, srq->hw_srq.max_sges);
3848 hdr = qed_chain_produce(pbl);
3849 num_sge = wr->num_sge;
3850 /* Set number of sge and work request id in header */
3851 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3853 srq->hw_srq.wr_prod_cnt++;
3857 DP_DEBUG(dev, QEDR_MSG_SRQ,
3858 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3859 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3861 for (i = 0; i < wr->num_sge; i++) {
3862 struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3864 /* Set SGE length, lkey and address */
3865 SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3866 wr->sg_list[i].length, wr->sg_list[i].lkey);
3868 DP_DEBUG(dev, QEDR_MSG_SRQ,
3869 "[%d]: len %d key %x addr %x:%x\n",
3870 i, srq_sge->length, srq_sge->l_key,
3871 srq_sge->addr.hi, srq_sge->addr.lo);
3875 /* Update WQE and SGE information before
3876 * updating producer.
3880 /* SRQ producer is 8 bytes. Need to update SGE producer index
3881 * in first 4 bytes and need to update WQE producer in
3884 srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3885 /* Make sure sge producer is updated first */
3887 srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3892 DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3893 qed_chain_get_elem_left(pbl));
3894 spin_unlock_irqrestore(&srq->lock, flags);
3899 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3900 const struct ib_recv_wr **bad_wr)
3902 struct qedr_qp *qp = get_qedr_qp(ibqp);
3903 struct qedr_dev *dev = qp->dev;
3904 unsigned long flags;
3907 if (qp->qp_type == IB_QPT_GSI)
3908 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3910 spin_lock_irqsave(&qp->q_lock, flags);
3915 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3916 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3917 wr->num_sge > qp->rq.max_sges) {
3918 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3919 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3920 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3926 for (i = 0; i < wr->num_sge; i++) {
3928 struct rdma_rq_sge *rqe =
3929 qed_chain_produce(&qp->rq.pbl);
3931 /* First one must include the number
3932 * of SGE in the list
3935 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3938 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3939 wr->sg_list[i].lkey);
3941 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3942 wr->sg_list[i].length, flags);
3945 /* Special case of no sges. FW requires between 1-4 sges...
3946 * in this case we need to post 1 sge with length zero. this is
3947 * because rdma write with immediate consumes an RQ.
3951 struct rdma_rq_sge *rqe =
3952 qed_chain_produce(&qp->rq.pbl);
3954 /* First one must include the number
3955 * of SGE in the list
3957 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3958 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3960 RQ_SGE_SET(rqe, 0, 0, flags);
3964 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3965 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3967 qedr_inc_sw_prod(&qp->rq);
3969 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3970 * soon as we give the doorbell, we could get a completion
3971 * for this wr, therefore we need to make sure that the
3972 * memory is update before giving the doorbell.
3973 * During qedr_poll_cq, rmb is called before accessing the
3974 * cqe. This covers for the smp_rmb as well.
3978 qp->rq.db_data.data.value++;
3980 writel(qp->rq.db_data.raw, qp->rq.db);
3982 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3983 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3989 spin_unlock_irqrestore(&qp->q_lock, flags);
3994 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3996 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3998 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4002 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4004 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4007 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4008 resp_cqe->qp_handle.lo,
4013 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4015 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4017 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4020 /* Return latest CQE (needs processing) */
4021 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4023 return cq->latest_cqe;
4026 /* In fmr we need to increase the number of fmr completed counter for the fmr
4027 * algorithm determining whether we can free a pbl or not.
4028 * we need to perform this whether the work request was signaled or not. for
4029 * this purpose we call this function from the condition that checks if a wr
4030 * should be skipped, to make sure we don't miss it ( possibly this fmr
4031 * operation was not signalted)
4033 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4035 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4036 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4039 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4040 struct qedr_cq *cq, int num_entries,
4041 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4046 while (num_entries && qp->sq.wqe_cons != hw_cons) {
4047 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4048 qedr_chk_if_fmr(qp);
4054 wc->status = status;
4057 wc->src_qp = qp->id;
4060 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4061 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4063 switch (wc->opcode) {
4064 case IB_WC_RDMA_WRITE:
4065 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4067 case IB_WC_COMP_SWAP:
4068 case IB_WC_FETCH_ADD:
4072 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4074 case IB_WC_RDMA_READ:
4076 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4086 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4087 qed_chain_consume(&qp->sq.pbl);
4088 qedr_inc_sw_cons(&qp->sq);
4094 static int qedr_poll_cq_req(struct qedr_dev *dev,
4095 struct qedr_qp *qp, struct qedr_cq *cq,
4096 int num_entries, struct ib_wc *wc,
4097 struct rdma_cqe_requester *req)
4101 switch (req->status) {
4102 case RDMA_CQE_REQ_STS_OK:
4103 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4106 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4107 if (qp->state != QED_ROCE_QP_STATE_ERR)
4108 DP_DEBUG(dev, QEDR_MSG_CQ,
4109 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4110 cq->icid, qp->icid);
4111 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4112 IB_WC_WR_FLUSH_ERR, 1);
4115 /* process all WQE before the cosumer */
4116 qp->state = QED_ROCE_QP_STATE_ERR;
4117 cnt = process_req(dev, qp, cq, num_entries, wc,
4118 req->sq_cons - 1, IB_WC_SUCCESS, 0);
4120 /* if we have extra WC fill it with actual error info */
4121 if (cnt < num_entries) {
4122 enum ib_wc_status wc_status;
4124 switch (req->status) {
4125 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4127 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4128 cq->icid, qp->icid);
4129 wc_status = IB_WC_BAD_RESP_ERR;
4131 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4133 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4134 cq->icid, qp->icid);
4135 wc_status = IB_WC_LOC_LEN_ERR;
4137 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4139 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4140 cq->icid, qp->icid);
4141 wc_status = IB_WC_LOC_QP_OP_ERR;
4143 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4145 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4146 cq->icid, qp->icid);
4147 wc_status = IB_WC_LOC_PROT_ERR;
4149 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4151 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4152 cq->icid, qp->icid);
4153 wc_status = IB_WC_MW_BIND_ERR;
4155 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4157 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4158 cq->icid, qp->icid);
4159 wc_status = IB_WC_REM_INV_REQ_ERR;
4161 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4163 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4164 cq->icid, qp->icid);
4165 wc_status = IB_WC_REM_ACCESS_ERR;
4167 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4169 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4170 cq->icid, qp->icid);
4171 wc_status = IB_WC_REM_OP_ERR;
4173 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4175 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4176 cq->icid, qp->icid);
4177 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4179 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4181 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4182 cq->icid, qp->icid);
4183 wc_status = IB_WC_RETRY_EXC_ERR;
4187 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4188 cq->icid, qp->icid);
4189 wc_status = IB_WC_GENERAL_ERR;
4191 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4199 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4202 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4203 return IB_WC_LOC_ACCESS_ERR;
4204 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4205 return IB_WC_LOC_LEN_ERR;
4206 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4207 return IB_WC_LOC_QP_OP_ERR;
4208 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4209 return IB_WC_LOC_PROT_ERR;
4210 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4211 return IB_WC_MW_BIND_ERR;
4212 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4213 return IB_WC_REM_INV_RD_REQ_ERR;
4214 case RDMA_CQE_RESP_STS_OK:
4215 return IB_WC_SUCCESS;
4217 return IB_WC_GENERAL_ERR;
4221 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4224 wc->status = IB_WC_SUCCESS;
4225 wc->byte_len = le32_to_cpu(resp->length);
4227 if (resp->flags & QEDR_RESP_IMM) {
4228 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4229 wc->wc_flags |= IB_WC_WITH_IMM;
4231 if (resp->flags & QEDR_RESP_RDMA)
4232 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4234 if (resp->flags & QEDR_RESP_INV)
4237 } else if (resp->flags & QEDR_RESP_INV) {
4238 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4239 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4241 if (resp->flags & QEDR_RESP_RDMA)
4244 } else if (resp->flags & QEDR_RESP_RDMA) {
4251 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4252 struct qedr_cq *cq, struct ib_wc *wc,
4253 struct rdma_cqe_responder *resp, u64 wr_id)
4255 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4256 wc->opcode = IB_WC_RECV;
4259 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4260 if (qedr_set_ok_cqe_resp_wc(resp, wc))
4262 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4263 cq, cq->icid, resp->flags);
4266 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4267 if (wc->status == IB_WC_GENERAL_ERR)
4269 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4270 cq, cq->icid, resp->status);
4273 /* Fill the rest of the WC */
4275 wc->src_qp = qp->id;
4280 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4281 struct qedr_cq *cq, struct ib_wc *wc,
4282 struct rdma_cqe_responder *resp)
4284 struct qedr_srq *srq = qp->srq;
4287 wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4288 le32_to_cpu(resp->srq_wr_id.lo), u64);
4290 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4291 wc->status = IB_WC_WR_FLUSH_ERR;
4295 wc->src_qp = qp->id;
4299 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4301 atomic_inc(&srq->hw_srq.wr_cons_cnt);
4305 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4306 struct qedr_cq *cq, struct ib_wc *wc,
4307 struct rdma_cqe_responder *resp)
4309 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4311 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4313 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4314 qed_chain_consume(&qp->rq.pbl);
4315 qedr_inc_sw_cons(&qp->rq);
4320 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4321 int num_entries, struct ib_wc *wc, u16 hw_cons)
4325 while (num_entries && qp->rq.wqe_cons != hw_cons) {
4327 wc->status = IB_WC_WR_FLUSH_ERR;
4330 wc->src_qp = qp->id;
4332 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4337 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4338 qed_chain_consume(&qp->rq.pbl);
4339 qedr_inc_sw_cons(&qp->rq);
4345 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4346 struct rdma_cqe_responder *resp, int *update)
4348 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4354 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4355 struct qedr_cq *cq, int num_entries,
4357 struct rdma_cqe_responder *resp)
4361 cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4367 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4368 struct qedr_cq *cq, int num_entries,
4369 struct ib_wc *wc, struct rdma_cqe_responder *resp,
4374 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4375 cnt = process_resp_flush(qp, cq, num_entries, wc,
4376 resp->rq_cons_or_srq_id);
4377 try_consume_resp_cqe(cq, qp, resp, update);
4379 cnt = process_resp_one(dev, qp, cq, wc, resp);
4387 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4388 struct rdma_cqe_requester *req, int *update)
4390 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4396 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4398 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4399 struct qedr_cq *cq = get_qedr_cq(ibcq);
4400 union rdma_cqe *cqe;
4401 u32 old_cons, new_cons;
4402 unsigned long flags;
4406 if (cq->destroyed) {
4408 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4413 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4414 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4416 spin_lock_irqsave(&cq->cq_lock, flags);
4417 cqe = cq->latest_cqe;
4418 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4419 while (num_entries && is_valid_cqe(cq, cqe)) {
4423 /* prevent speculative reads of any field of CQE */
4426 qp = cqe_get_qp(cqe);
4428 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4434 switch (cqe_get_type(cqe)) {
4435 case RDMA_CQE_TYPE_REQUESTER:
4436 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4438 try_consume_req_cqe(cq, qp, &cqe->req, &update);
4440 case RDMA_CQE_TYPE_RESPONDER_RQ:
4441 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4442 &cqe->resp, &update);
4444 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4445 cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4449 case RDMA_CQE_TYPE_INVALID:
4451 DP_ERR(dev, "Error: invalid CQE type = %d\n",
4460 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4462 cq->cq_cons += new_cons - old_cons;
4465 /* doorbell notifies abount latest VALID entry,
4466 * but chain already point to the next INVALID one
4468 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4470 spin_unlock_irqrestore(&cq->cq_lock, flags);
4474 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4475 u32 port_num, const struct ib_wc *in_wc,
4476 const struct ib_grh *in_grh, const struct ib_mad *in,
4477 struct ib_mad *out_mad, size_t *out_mad_size,
4478 u16 *out_mad_pkey_index)
4480 return IB_MAD_RESULT_SUCCESS;