RDMA: Constify the argument of the work request conversion functions
[linux-2.6-block.git] / drivers / infiniband / hw / qedr / verbs.c
CommitLineData
ac1b36e5
RA
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36#include <net/udp.h>
37#include <linux/iommu.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
be086e7c
MY
46#include <linux/qed/common_hsi.h>
47#include "qedr_hsi_rdma.h"
ac1b36e5
RA
48#include <linux/qed/qed_if.h>
49#include "qedr.h"
50#include "verbs.h"
51#include <rdma/qedr-abi.h>
99d195cc 52#include "qedr_roce_cm.h"
ac1b36e5 53
a7efd777
RA
54#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55
c75d3ec8
AR
56static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
a7efd777
RA
64int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
65{
66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
67 return -EINVAL;
68
69 *pkey = QEDR_ROCE_PKEY_DEFAULT;
70 return 0;
71}
72
e6a38c54
KM
73int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
74 int index, union ib_gid *sgid)
75{
76 struct qedr_dev *dev = get_qedr_dev(ibdev);
77
78 memset(sgid->raw, 0, sizeof(sgid->raw));
79 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
80
81 DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
82 sgid->global.interface_id, sgid->global.subnet_prefix);
83
84 return 0;
85}
86
ac1b36e5
RA
87int qedr_query_device(struct ib_device *ibdev,
88 struct ib_device_attr *attr, struct ib_udata *udata)
89{
90 struct qedr_dev *dev = get_qedr_dev(ibdev);
91 struct qedr_device_attr *qattr = &dev->attr;
92
93 if (!dev->rdma_ctx) {
94 DP_ERR(dev,
95 "qedr_query_device called with invalid params rdma_ctx=%p\n",
96 dev->rdma_ctx);
97 return -EINVAL;
98 }
99
100 memset(attr, 0, sizeof(*attr));
101
102 attr->fw_ver = qattr->fw_ver;
103 attr->sys_image_guid = qattr->sys_image_guid;
104 attr->max_mr_size = qattr->max_mr_size;
105 attr->page_size_cap = qattr->page_size_caps;
106 attr->vendor_id = qattr->vendor_id;
107 attr->vendor_part_id = qattr->vendor_part_id;
108 attr->hw_ver = qattr->hw_ver;
109 attr->max_qp = qattr->max_qp;
110 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
111 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
112 IB_DEVICE_RC_RNR_NAK_GEN |
113 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
114
33023fb8
SW
115 attr->max_send_sge = qattr->max_sge;
116 attr->max_recv_sge = qattr->max_sge;
ac1b36e5
RA
117 attr->max_sge_rd = qattr->max_sge;
118 attr->max_cq = qattr->max_cq;
119 attr->max_cqe = qattr->max_cqe;
120 attr->max_mr = qattr->max_mr;
121 attr->max_mw = qattr->max_mw;
122 attr->max_pd = qattr->max_pd;
123 attr->atomic_cap = dev->atomic_cap;
124 attr->max_fmr = qattr->max_fmr;
125 attr->max_map_per_fmr = 16;
126 attr->max_qp_init_rd_atom =
127 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
128 attr->max_qp_rd_atom =
129 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
130 attr->max_qp_init_rd_atom);
131
132 attr->max_srq = qattr->max_srq;
133 attr->max_srq_sge = qattr->max_srq_sge;
134 attr->max_srq_wr = qattr->max_srq_wr;
135
136 attr->local_ca_ack_delay = qattr->dev_ack_delay;
137 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
138 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
139 attr->max_ah = qattr->max_ah;
140
141 return 0;
142}
143
144#define QEDR_SPEED_SDR (1)
145#define QEDR_SPEED_DDR (2)
146#define QEDR_SPEED_QDR (4)
147#define QEDR_SPEED_FDR10 (8)
148#define QEDR_SPEED_FDR (16)
149#define QEDR_SPEED_EDR (32)
150
151static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
152 u8 *ib_width)
153{
154 switch (speed) {
155 case 1000:
156 *ib_speed = QEDR_SPEED_SDR;
157 *ib_width = IB_WIDTH_1X;
158 break;
159 case 10000:
160 *ib_speed = QEDR_SPEED_QDR;
161 *ib_width = IB_WIDTH_1X;
162 break;
163
164 case 20000:
165 *ib_speed = QEDR_SPEED_DDR;
166 *ib_width = IB_WIDTH_4X;
167 break;
168
169 case 25000:
170 *ib_speed = QEDR_SPEED_EDR;
171 *ib_width = IB_WIDTH_1X;
172 break;
173
174 case 40000:
175 *ib_speed = QEDR_SPEED_QDR;
176 *ib_width = IB_WIDTH_4X;
177 break;
178
179 case 50000:
180 *ib_speed = QEDR_SPEED_QDR;
181 *ib_width = IB_WIDTH_4X;
182 break;
183
184 case 100000:
185 *ib_speed = QEDR_SPEED_EDR;
186 *ib_width = IB_WIDTH_4X;
187 break;
188
189 default:
190 /* Unsupported */
191 *ib_speed = QEDR_SPEED_SDR;
192 *ib_width = IB_WIDTH_1X;
193 }
194}
195
196int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
197{
198 struct qedr_dev *dev;
199 struct qed_rdma_port *rdma_port;
200
201 dev = get_qedr_dev(ibdev);
202 if (port > 1) {
203 DP_ERR(dev, "invalid_port=0x%x\n", port);
204 return -EINVAL;
205 }
206
207 if (!dev->rdma_ctx) {
208 DP_ERR(dev, "rdma_ctx is NULL\n");
209 return -EINVAL;
210 }
211
212 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
ac1b36e5 213
c4550c63 214 /* *attr being zeroed by the caller, avoid zeroing it here */
ac1b36e5
RA
215 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
216 attr->state = IB_PORT_ACTIVE;
217 attr->phys_state = 5;
218 } else {
219 attr->state = IB_PORT_DOWN;
220 attr->phys_state = 3;
221 }
222 attr->max_mtu = IB_MTU_4096;
223 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
224 attr->lid = 0;
225 attr->lmc = 0;
226 attr->sm_lid = 0;
227 attr->sm_sl = 0;
2f944c0f 228 attr->ip_gids = true;
f5b1b177
KM
229 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
230 attr->gid_tbl_len = 1;
231 attr->pkey_tbl_len = 1;
232 } else {
233 attr->gid_tbl_len = QEDR_MAX_SGID;
234 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
235 }
ac1b36e5
RA
236 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
237 attr->qkey_viol_cntr = 0;
238 get_link_speed_and_width(rdma_port->link_speed,
239 &attr->active_speed, &attr->active_width);
240 attr->max_msg_sz = rdma_port->max_msg_size;
241 attr->max_vl_num = 4;
242
243 return 0;
244}
245
246int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
247 struct ib_port_modify *props)
248{
249 struct qedr_dev *dev;
250
251 dev = get_qedr_dev(ibdev);
252 if (port > 1) {
253 DP_ERR(dev, "invalid_port=0x%x\n", port);
254 return -EINVAL;
255 }
256
257 return 0;
258}
259
260static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
261 unsigned long len)
262{
263 struct qedr_mm *mm;
264
265 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
266 if (!mm)
267 return -ENOMEM;
268
269 mm->key.phy_addr = phy_addr;
270 /* This function might be called with a length which is not a multiple
271 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
272 * forces this granularity by increasing the requested size if needed.
273 * When qedr_mmap is called, it will search the list with the updated
274 * length as a key. To prevent search failures, the length is rounded up
275 * in advance to PAGE_SIZE.
276 */
277 mm->key.len = roundup(len, PAGE_SIZE);
278 INIT_LIST_HEAD(&mm->entry);
279
280 mutex_lock(&uctx->mm_list_lock);
281 list_add(&mm->entry, &uctx->mm_head);
282 mutex_unlock(&uctx->mm_list_lock);
283
284 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
285 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
286 (unsigned long long)mm->key.phy_addr,
287 (unsigned long)mm->key.len, uctx);
288
289 return 0;
290}
291
292static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
293 unsigned long len)
294{
295 bool found = false;
296 struct qedr_mm *mm;
297
298 mutex_lock(&uctx->mm_list_lock);
299 list_for_each_entry(mm, &uctx->mm_head, entry) {
300 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
301 continue;
302
303 found = true;
304 break;
305 }
306 mutex_unlock(&uctx->mm_list_lock);
307 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
309 mm->key.phy_addr, mm->key.len, uctx, found);
310
311 return found;
312}
313
314struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
315 struct ib_udata *udata)
316{
317 int rc;
318 struct qedr_ucontext *ctx;
319 struct qedr_alloc_ucontext_resp uresp;
320 struct qedr_dev *dev = get_qedr_dev(ibdev);
321 struct qed_rdma_add_user_out_params oparams;
322
323 if (!udata)
324 return ERR_PTR(-EFAULT);
325
326 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
327 if (!ctx)
328 return ERR_PTR(-ENOMEM);
329
330 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
331 if (rc) {
332 DP_ERR(dev,
333 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
334 rc);
335 goto err;
336 }
337
338 ctx->dpi = oparams.dpi;
339 ctx->dpi_addr = oparams.dpi_addr;
340 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
341 ctx->dpi_size = oparams.dpi_size;
342 INIT_LIST_HEAD(&ctx->mm_head);
343 mutex_init(&ctx->mm_list_lock);
344
345 memset(&uresp, 0, sizeof(uresp));
346
ad84dad2 347 uresp.dpm_enabled = dev->user_dpm_enabled;
67cbe353
AR
348 uresp.wids_enabled = 1;
349 uresp.wid_count = oparams.wid_count;
ac1b36e5
RA
350 uresp.db_pa = ctx->dpi_phys_addr;
351 uresp.db_size = ctx->dpi_size;
352 uresp.max_send_wr = dev->attr.max_sqe;
353 uresp.max_recv_wr = dev->attr.max_rqe;
354 uresp.max_srq_wr = dev->attr.max_srq_wr;
355 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
356 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
357 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
358 uresp.max_cqes = QEDR_MAX_CQES;
359
c75d3ec8 360 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
ac1b36e5
RA
361 if (rc)
362 goto err;
363
364 ctx->dev = dev;
365
366 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
367 if (rc)
368 goto err;
369
370 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
371 &ctx->ibucontext);
372 return &ctx->ibucontext;
373
374err:
375 kfree(ctx);
376 return ERR_PTR(rc);
377}
378
379int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
380{
381 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
382 struct qedr_mm *mm, *tmp;
383 int status = 0;
384
385 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
386 uctx);
387 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
388
389 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
390 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
391 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
392 mm->key.phy_addr, mm->key.len, uctx);
393 list_del(&mm->entry);
394 kfree(mm);
395 }
396
397 kfree(uctx);
398 return status;
399}
400
401int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
402{
403 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
404 struct qedr_dev *dev = get_qedr_dev(context->device);
30bf066c 405 unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
ac1b36e5 406 unsigned long len = (vma->vm_end - vma->vm_start);
30bf066c
KM
407 unsigned long dpi_start;
408
409 dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
ac1b36e5
RA
410
411 DP_DEBUG(dev, QEDR_MSG_INIT,
30bf066c
KM
412 "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
413 (void *)vma->vm_start, (void *)vma->vm_end,
414 (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
415
416 if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
417 DP_ERR(dev,
367d2f07 418 "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
30bf066c 419 (void *)vma->vm_start, (void *)vma->vm_end);
ac1b36e5
RA
420 return -EINVAL;
421 }
422
30bf066c
KM
423 if (!qedr_search_mmap(ucontext, phys_addr, len)) {
424 DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
ac1b36e5
RA
425 vma->vm_pgoff);
426 return -EINVAL;
427 }
428
30bf066c
KM
429 if (phys_addr < dpi_start ||
430 ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
431 DP_ERR(dev,
432 "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
433 (void *)phys_addr, (void *)dpi_start,
434 ucontext->dpi_size);
435 return -EINVAL;
436 }
ac1b36e5 437
30bf066c
KM
438 if (vma->vm_flags & VM_READ) {
439 DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
440 return -EINVAL;
ac1b36e5 441 }
30bf066c
KM
442
443 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
444 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
445 vma->vm_page_prot);
ac1b36e5 446}
a7efd777
RA
447
448struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
449 struct ib_ucontext *context, struct ib_udata *udata)
450{
451 struct qedr_dev *dev = get_qedr_dev(ibdev);
a7efd777
RA
452 struct qedr_pd *pd;
453 u16 pd_id;
454 int rc;
455
456 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
457 (udata && context) ? "User Lib" : "Kernel");
458
459 if (!dev->rdma_ctx) {
847cb1a3 460 DP_ERR(dev, "invalid RDMA context\n");
a7efd777
RA
461 return ERR_PTR(-EINVAL);
462 }
463
464 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
465 if (!pd)
466 return ERR_PTR(-ENOMEM);
467
9c1e0228
RA
468 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
469 if (rc)
470 goto err;
a7efd777 471
a7efd777
RA
472 pd->pd_id = pd_id;
473
474 if (udata && context) {
57939021
JG
475 struct qedr_alloc_pd_uresp uresp = {
476 .pd_id = pd_id,
477 };
9c1e0228 478
c75d3ec8 479 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
9c1e0228 480 if (rc) {
a7efd777 481 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
9c1e0228
RA
482 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
483 goto err;
484 }
485
486 pd->uctx = get_qedr_ucontext(context);
487 pd->uctx->pd = pd;
a7efd777
RA
488 }
489
490 return &pd->ibpd;
9c1e0228
RA
491
492err:
493 kfree(pd);
494 return ERR_PTR(rc);
a7efd777
RA
495}
496
497int qedr_dealloc_pd(struct ib_pd *ibpd)
498{
499 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
500 struct qedr_pd *pd = get_qedr_pd(ibpd);
501
ea7ef2ac 502 if (!pd) {
a7efd777 503 pr_err("Invalid PD received in dealloc_pd\n");
ea7ef2ac
CIK
504 return -EINVAL;
505 }
a7efd777
RA
506
507 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
508 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
509
510 kfree(pd);
511
512 return 0;
513}
514
515static void qedr_free_pbl(struct qedr_dev *dev,
516 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
517{
518 struct pci_dev *pdev = dev->pdev;
519 int i;
520
521 for (i = 0; i < pbl_info->num_pbls; i++) {
522 if (!pbl[i].va)
523 continue;
524 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
525 pbl[i].va, pbl[i].pa);
526 }
527
528 kfree(pbl);
529}
530
531#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
532#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
533
534#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
535#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
536#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
537
538static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
539 struct qedr_pbl_info *pbl_info,
540 gfp_t flags)
541{
542 struct pci_dev *pdev = dev->pdev;
543 struct qedr_pbl *pbl_table;
544 dma_addr_t *pbl_main_tbl;
545 dma_addr_t pa;
546 void *va;
547 int i;
548
549 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
550 if (!pbl_table)
551 return ERR_PTR(-ENOMEM);
552
553 for (i = 0; i < pbl_info->num_pbls; i++) {
7bced914
HJ
554 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
555 &pa, flags);
a7efd777
RA
556 if (!va)
557 goto err;
558
a7efd777
RA
559 pbl_table[i].va = va;
560 pbl_table[i].pa = pa;
561 }
562
563 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
564 * the first one with physical pointers to all of the rest
565 */
566 pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
567 for (i = 0; i < pbl_info->num_pbls - 1; i++)
568 pbl_main_tbl[i] = pbl_table[i + 1].pa;
569
570 return pbl_table;
571
572err:
573 for (i--; i >= 0; i--)
574 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
575 pbl_table[i].va, pbl_table[i].pa);
576
577 qedr_free_pbl(dev, pbl_info, pbl_table);
578
579 return ERR_PTR(-ENOMEM);
580}
581
582static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
583 struct qedr_pbl_info *pbl_info,
584 u32 num_pbes, int two_layer_capable)
585{
586 u32 pbl_capacity;
587 u32 pbl_size;
588 u32 num_pbls;
589
590 if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
591 if (num_pbes > MAX_PBES_TWO_LAYER) {
592 DP_ERR(dev, "prepare pbl table: too many pages %d\n",
593 num_pbes);
594 return -EINVAL;
595 }
596
597 /* calculate required pbl page size */
598 pbl_size = MIN_FW_PBL_PAGE_SIZE;
599 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
600 NUM_PBES_ON_PAGE(pbl_size);
601
602 while (pbl_capacity < num_pbes) {
603 pbl_size *= 2;
604 pbl_capacity = pbl_size / sizeof(u64);
605 pbl_capacity = pbl_capacity * pbl_capacity;
606 }
607
608 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
609 num_pbls++; /* One for the layer0 ( points to the pbls) */
610 pbl_info->two_layered = true;
611 } else {
612 /* One layered PBL */
613 num_pbls = 1;
614 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
615 roundup_pow_of_two((num_pbes * sizeof(u64))));
616 pbl_info->two_layered = false;
617 }
618
619 pbl_info->num_pbls = num_pbls;
620 pbl_info->pbl_size = pbl_size;
621 pbl_info->num_pbes = num_pbes;
622
623 DP_DEBUG(dev, QEDR_MSG_MR,
624 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
625 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
626
627 return 0;
628}
629
630static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
631 struct qedr_pbl *pbl,
e57bb6be 632 struct qedr_pbl_info *pbl_info, u32 pg_shift)
a7efd777
RA
633{
634 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
e57bb6be 635 u32 fw_pg_cnt, fw_pg_per_umem_pg;
a7efd777
RA
636 struct qedr_pbl *pbl_tbl;
637 struct scatterlist *sg;
638 struct regpair *pbe;
e57bb6be 639 u64 pg_addr;
a7efd777 640 int entry;
a7efd777
RA
641
642 if (!pbl_info->num_pbes)
643 return;
644
645 /* If we have a two layered pbl, the first pbl points to the rest
646 * of the pbls and the first entry lays on the second pbl in the table
647 */
648 if (pbl_info->two_layered)
649 pbl_tbl = &pbl[1];
650 else
651 pbl_tbl = pbl;
652
653 pbe = (struct regpair *)pbl_tbl->va;
654 if (!pbe) {
655 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
656 return;
657 }
658
659 pbe_cnt = 0;
660
3e7e1193 661 shift = umem->page_shift;
a7efd777 662
e57bb6be
RA
663 fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
664
a7efd777
RA
665 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
666 pages = sg_dma_len(sg) >> shift;
e57bb6be 667 pg_addr = sg_dma_address(sg);
a7efd777 668 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
e57bb6be
RA
669 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
670 pbe->lo = cpu_to_le32(pg_addr);
671 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
672
673 pg_addr += BIT(pg_shift);
674 pbe_cnt++;
675 total_num_pbes++;
676 pbe++;
677
678 if (total_num_pbes == pbl_info->num_pbes)
679 return;
680
681 /* If the given pbl is full storing the pbes,
682 * move to next pbl.
683 */
684 if (pbe_cnt ==
685 (pbl_info->pbl_size / sizeof(u64))) {
686 pbl_tbl++;
687 pbe = (struct regpair *)pbl_tbl->va;
688 pbe_cnt = 0;
689 }
690
691 fw_pg_cnt++;
a7efd777
RA
692 }
693 }
694 }
695}
696
697static int qedr_copy_cq_uresp(struct qedr_dev *dev,
698 struct qedr_cq *cq, struct ib_udata *udata)
699{
700 struct qedr_create_cq_uresp uresp;
701 int rc;
702
703 memset(&uresp, 0, sizeof(uresp));
704
705 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
706 uresp.icid = cq->icid;
707
c75d3ec8 708 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
a7efd777
RA
709 if (rc)
710 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
711
712 return rc;
713}
714
715static void consume_cqe(struct qedr_cq *cq)
716{
717 if (cq->latest_cqe == cq->toggle_cqe)
718 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
719
720 cq->latest_cqe = qed_chain_consume(&cq->pbl);
721}
722
723static inline int qedr_align_cq_entries(int entries)
724{
725 u64 size, aligned_size;
726
727 /* We allocate an extra entry that we don't report to the FW. */
728 size = (entries + 1) * QEDR_CQE_SIZE;
729 aligned_size = ALIGN(size, PAGE_SIZE);
730
731 return aligned_size / QEDR_CQE_SIZE;
732}
733
734static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
735 struct qedr_dev *dev,
736 struct qedr_userq *q,
737 u64 buf_addr, size_t buf_len,
69ad0e7f
KM
738 int access, int dmasync,
739 int alloc_and_init)
a7efd777 740{
e57bb6be 741 u32 fw_pages;
a7efd777
RA
742 int rc;
743
744 q->buf_addr = buf_addr;
745 q->buf_len = buf_len;
746 q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
747 if (IS_ERR(q->umem)) {
748 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
749 PTR_ERR(q->umem));
750 return PTR_ERR(q->umem);
751 }
752
e57bb6be
RA
753 fw_pages = ib_umem_page_count(q->umem) <<
754 (q->umem->page_shift - FW_PAGE_SHIFT);
755
756 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
a7efd777
RA
757 if (rc)
758 goto err0;
759
69ad0e7f
KM
760 if (alloc_and_init) {
761 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
762 if (IS_ERR(q->pbl_tbl)) {
763 rc = PTR_ERR(q->pbl_tbl);
764 goto err0;
765 }
e57bb6be
RA
766 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
767 FW_PAGE_SHIFT);
69ad0e7f
KM
768 } else {
769 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
89fd2576
DC
770 if (!q->pbl_tbl) {
771 rc = -ENOMEM;
69ad0e7f 772 goto err0;
89fd2576 773 }
69ad0e7f 774 }
a7efd777
RA
775
776 return 0;
777
778err0:
779 ib_umem_release(q->umem);
69ad0e7f 780 q->umem = NULL;
a7efd777
RA
781
782 return rc;
783}
784
785static inline void qedr_init_cq_params(struct qedr_cq *cq,
786 struct qedr_ucontext *ctx,
787 struct qedr_dev *dev, int vector,
788 int chain_entries, int page_cnt,
789 u64 pbl_ptr,
790 struct qed_rdma_create_cq_in_params
791 *params)
792{
793 memset(params, 0, sizeof(*params));
794 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
795 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
796 params->cnq_id = vector;
797 params->cq_size = chain_entries - 1;
798 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
799 params->pbl_num_pages = page_cnt;
800 params->pbl_ptr = pbl_ptr;
801 params->pbl_two_level = 0;
802}
803
804static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
805{
a7efd777
RA
806 cq->db.data.agg_flags = flags;
807 cq->db.data.value = cpu_to_le32(cons);
808 writeq(cq->db.raw, cq->db_addr);
809
810 /* Make sure write would stick */
811 mmiowb();
812}
813
814int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
815{
816 struct qedr_cq *cq = get_qedr_cq(ibcq);
817 unsigned long sflags;
4dd72636
AR
818 struct qedr_dev *dev;
819
820 dev = get_qedr_dev(ibcq->device);
821
822 if (cq->destroyed) {
823 DP_ERR(dev,
824 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
825 cq, cq->icid);
826 return -EINVAL;
827 }
828
a7efd777
RA
829
830 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
831 return 0;
832
833 spin_lock_irqsave(&cq->cq_lock, sflags);
834
835 cq->arm_flags = 0;
836
837 if (flags & IB_CQ_SOLICITED)
838 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
839
840 if (flags & IB_CQ_NEXT_COMP)
841 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
842
843 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
844
845 spin_unlock_irqrestore(&cq->cq_lock, sflags);
846
847 return 0;
848}
849
850struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
851 const struct ib_cq_init_attr *attr,
852 struct ib_ucontext *ib_ctx, struct ib_udata *udata)
853{
854 struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
855 struct qed_rdma_destroy_cq_out_params destroy_oparams;
856 struct qed_rdma_destroy_cq_in_params destroy_iparams;
857 struct qedr_dev *dev = get_qedr_dev(ibdev);
858 struct qed_rdma_create_cq_in_params params;
859 struct qedr_create_cq_ureq ureq;
860 int vector = attr->comp_vector;
861 int entries = attr->cqe;
862 struct qedr_cq *cq;
863 int chain_entries;
864 int page_cnt;
865 u64 pbl_ptr;
866 u16 icid;
867 int rc;
868
869 DP_DEBUG(dev, QEDR_MSG_INIT,
870 "create_cq: called from %s. entries=%d, vector=%d\n",
871 udata ? "User Lib" : "Kernel", entries, vector);
872
873 if (entries > QEDR_MAX_CQES) {
874 DP_ERR(dev,
875 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
876 entries, QEDR_MAX_CQES);
877 return ERR_PTR(-EINVAL);
878 }
879
880 chain_entries = qedr_align_cq_entries(entries);
881 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
882
883 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
884 if (!cq)
885 return ERR_PTR(-ENOMEM);
886
887 if (udata) {
888 memset(&ureq, 0, sizeof(ureq));
889 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
890 DP_ERR(dev,
891 "create cq: problem copying data from user space\n");
892 goto err0;
893 }
894
895 if (!ureq.len) {
896 DP_ERR(dev,
897 "create cq: cannot create a cq with 0 entries\n");
898 goto err0;
899 }
900
901 cq->cq_type = QEDR_CQ_TYPE_USER;
902
903 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
69ad0e7f
KM
904 ureq.len, IB_ACCESS_LOCAL_WRITE,
905 1, 1);
a7efd777
RA
906 if (rc)
907 goto err0;
908
909 pbl_ptr = cq->q.pbl_tbl->pa;
910 page_cnt = cq->q.pbl_info.num_pbes;
c7eb3bce
AR
911
912 cq->ibcq.cqe = chain_entries;
a7efd777
RA
913 } else {
914 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
915
916 rc = dev->ops->common->chain_alloc(dev->cdev,
917 QED_CHAIN_USE_TO_CONSUME,
918 QED_CHAIN_MODE_PBL,
919 QED_CHAIN_CNT_TYPE_U32,
920 chain_entries,
921 sizeof(union rdma_cqe),
1a4a6975 922 &cq->pbl, NULL);
a7efd777
RA
923 if (rc)
924 goto err1;
925
926 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
927 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
c7eb3bce 928 cq->ibcq.cqe = cq->pbl.capacity;
a7efd777
RA
929 }
930
931 qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
932 pbl_ptr, &params);
933
934 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
935 if (rc)
936 goto err2;
937
938 cq->icid = icid;
939 cq->sig = QEDR_CQ_MAGIC_NUMBER;
940 spin_lock_init(&cq->cq_lock);
941
942 if (ib_ctx) {
943 rc = qedr_copy_cq_uresp(dev, cq, udata);
944 if (rc)
945 goto err3;
946 } else {
947 /* Generate doorbell address. */
948 cq->db_addr = dev->db_addr +
949 DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
950 cq->db.data.icid = cq->icid;
951 cq->db.data.params = DB_AGG_CMD_SET <<
952 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
953
954 /* point to the very last element, passing it we will toggle */
955 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
956 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
957 cq->latest_cqe = NULL;
958 consume_cqe(cq);
959 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
960 }
961
962 DP_DEBUG(dev, QEDR_MSG_CQ,
963 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
964 cq->icid, cq, params.cq_size);
965
966 return &cq->ibcq;
967
968err3:
969 destroy_iparams.icid = cq->icid;
970 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
971 &destroy_oparams);
972err2:
973 if (udata)
974 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
975 else
976 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
977err1:
978 if (udata)
979 ib_umem_release(cq->q.umem);
980err0:
981 kfree(cq);
982 return ERR_PTR(-EINVAL);
983}
984
985int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
986{
987 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
988 struct qedr_cq *cq = get_qedr_cq(ibcq);
989
990 DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
991
992 return 0;
993}
994
4dd72636
AR
995#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
996#define QEDR_DESTROY_CQ_ITER_DURATION (10)
997
a7efd777
RA
998int qedr_destroy_cq(struct ib_cq *ibcq)
999{
1000 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1001 struct qed_rdma_destroy_cq_out_params oparams;
1002 struct qed_rdma_destroy_cq_in_params iparams;
1003 struct qedr_cq *cq = get_qedr_cq(ibcq);
4dd72636 1004 int iter;
942b3b2c 1005 int rc;
a7efd777 1006
942b3b2c 1007 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
a7efd777 1008
4dd72636
AR
1009 cq->destroyed = 1;
1010
a7efd777 1011 /* GSIs CQs are handled by driver, so they don't exist in the FW */
942b3b2c
AR
1012 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1013 goto done;
a1211359 1014
942b3b2c
AR
1015 iparams.icid = cq->icid;
1016 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1017 if (rc)
1018 return rc;
1019
1020 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
a7efd777
RA
1021
1022 if (ibcq->uobject && ibcq->uobject->context) {
1023 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1024 ib_umem_release(cq->q.umem);
1025 }
1026
4dd72636
AR
1027 /* We don't want the IRQ handler to handle a non-existing CQ so we
1028 * wait until all CNQ interrupts, if any, are received. This will always
1029 * happen and will always happen very fast. If not, then a serious error
1030 * has occured. That is why we can use a long delay.
1031 * We spin for a short time so we don’t lose time on context switching
1032 * in case all the completions are handled in that span. Otherwise
1033 * we sleep for a while and check again. Since the CNQ may be
1034 * associated with (only) the current CPU we use msleep to allow the
1035 * current CPU to be freed.
1036 * The CNQ notification is increased in qedr_irq_handler().
1037 */
1038 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1039 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1040 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1041 iter--;
1042 }
1043
1044 iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1045 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1046 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1047 iter--;
1048 }
1049
1050 if (oparams.num_cq_notif != cq->cnq_notif)
1051 goto err;
1052
1053 /* Note that we don't need to have explicit code to wait for the
1054 * completion of the event handler because it is invoked from the EQ.
1055 * Since the destroy CQ ramrod has also been received on the EQ we can
1056 * be certain that there's no event handler in process.
1057 */
942b3b2c 1058done:
4dd72636
AR
1059 cq->sig = ~cq->sig;
1060
a7efd777
RA
1061 kfree(cq);
1062
1063 return 0;
4dd72636
AR
1064
1065err:
1066 DP_ERR(dev,
1067 "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1068 cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1069
1070 return -EINVAL;
a7efd777 1071}
cecbcddf
RA
1072
1073static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1074 struct ib_qp_attr *attr,
1075 int attr_mask,
1076 struct qed_rdma_modify_qp_in_params
1077 *qp_params)
1078{
47ec3866 1079 const struct ib_gid_attr *gid_attr;
cecbcddf 1080 enum rdma_network_type nw_type;
d8966fcd 1081 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
cecbcddf 1082 u32 ipv4_addr;
cecbcddf
RA
1083 int i;
1084
47ec3866
PP
1085 gid_attr = grh->sgid_attr;
1086 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
3e44e0ee 1087
47ec3866 1088 nw_type = rdma_gid_attr_network_type(gid_attr);
3e44e0ee
PP
1089 switch (nw_type) {
1090 case RDMA_NETWORK_IPV6:
47ec3866 1091 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
3e44e0ee
PP
1092 sizeof(qp_params->sgid));
1093 memcpy(&qp_params->dgid.bytes[0],
1094 &grh->dgid,
1095 sizeof(qp_params->dgid));
1096 qp_params->roce_mode = ROCE_V2_IPV6;
1097 SET_FIELD(qp_params->modify_flags,
1098 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1099 break;
1100 case RDMA_NETWORK_IB:
47ec3866 1101 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
3e44e0ee
PP
1102 sizeof(qp_params->sgid));
1103 memcpy(&qp_params->dgid.bytes[0],
1104 &grh->dgid,
1105 sizeof(qp_params->dgid));
1106 qp_params->roce_mode = ROCE_V1;
1107 break;
1108 case RDMA_NETWORK_IPV4:
1109 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1110 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
47ec3866 1111 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
3e44e0ee
PP
1112 qp_params->sgid.ipv4_addr = ipv4_addr;
1113 ipv4_addr =
1114 qedr_get_ipv4_from_gid(grh->dgid.raw);
1115 qp_params->dgid.ipv4_addr = ipv4_addr;
1116 SET_FIELD(qp_params->modify_flags,
1117 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1118 qp_params->roce_mode = ROCE_V2_IPV4;
1119 break;
cecbcddf
RA
1120 }
1121
1122 for (i = 0; i < 4; i++) {
1123 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1124 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1125 }
1126
1127 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1128 qp_params->vlan_id = 0;
1129
1130 return 0;
1131}
1132
cecbcddf
RA
1133static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1134 struct ib_qp_init_attr *attrs)
1135{
1136 struct qedr_device_attr *qattr = &dev->attr;
1137
1138 /* QP0... attrs->qp_type == IB_QPT_GSI */
1139 if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1140 DP_DEBUG(dev, QEDR_MSG_QP,
1141 "create qp: unsupported qp type=0x%x requested\n",
1142 attrs->qp_type);
1143 return -EINVAL;
1144 }
1145
1146 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1147 DP_ERR(dev,
1148 "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1149 attrs->cap.max_send_wr, qattr->max_sqe);
1150 return -EINVAL;
1151 }
1152
1153 if (attrs->cap.max_inline_data > qattr->max_inline) {
1154 DP_ERR(dev,
1155 "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1156 attrs->cap.max_inline_data, qattr->max_inline);
1157 return -EINVAL;
1158 }
1159
1160 if (attrs->cap.max_send_sge > qattr->max_sge) {
1161 DP_ERR(dev,
1162 "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1163 attrs->cap.max_send_sge, qattr->max_sge);
1164 return -EINVAL;
1165 }
1166
1167 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1168 DP_ERR(dev,
1169 "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1170 attrs->cap.max_recv_sge, qattr->max_sge);
1171 return -EINVAL;
1172 }
1173
1174 /* Unprivileged user space cannot create special QP */
1175 if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1176 DP_ERR(dev,
1177 "create qp: userspace can't create special QPs of type=0x%x\n",
1178 attrs->qp_type);
1179 return -EINVAL;
1180 }
1181
1182 return 0;
1183}
1184
69ad0e7f
KM
1185static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1186 struct qedr_create_qp_uresp *uresp,
cecbcddf
RA
1187 struct qedr_qp *qp)
1188{
69ad0e7f
KM
1189 /* iWARP requires two doorbells per RQ. */
1190 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1191 uresp->rq_db_offset =
1192 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1193 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1194 } else {
1195 uresp->rq_db_offset =
1196 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1197 }
1198
cecbcddf
RA
1199 uresp->rq_icid = qp->icid;
1200}
1201
69ad0e7f
KM
1202static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1203 struct qedr_create_qp_uresp *uresp,
cecbcddf
RA
1204 struct qedr_qp *qp)
1205{
1206 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
69ad0e7f
KM
1207
1208 /* iWARP uses the same cid for rq and sq */
1209 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1210 uresp->sq_icid = qp->icid;
1211 else
1212 uresp->sq_icid = qp->icid + 1;
cecbcddf
RA
1213}
1214
1215static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1216 struct qedr_qp *qp, struct ib_udata *udata)
1217{
1218 struct qedr_create_qp_uresp uresp;
1219 int rc;
1220
1221 memset(&uresp, 0, sizeof(uresp));
69ad0e7f
KM
1222 qedr_copy_sq_uresp(dev, &uresp, qp);
1223 qedr_copy_rq_uresp(dev, &uresp, qp);
cecbcddf
RA
1224
1225 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1226 uresp.qp_id = qp->qp_id;
1227
c75d3ec8 1228 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
cecbcddf
RA
1229 if (rc)
1230 DP_ERR(dev,
1231 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1232 qp->icid);
1233
1234 return rc;
1235}
1236
df158561
AR
1237static void qedr_set_common_qp_params(struct qedr_dev *dev,
1238 struct qedr_qp *qp,
1239 struct qedr_pd *pd,
1240 struct ib_qp_init_attr *attrs)
cecbcddf 1241{
cecbcddf 1242 spin_lock_init(&qp->q_lock);
de0089e6 1243 atomic_set(&qp->refcnt, 1);
df158561 1244 qp->pd = pd;
cecbcddf
RA
1245 qp->qp_type = attrs->qp_type;
1246 qp->max_inline_data = attrs->cap.max_inline_data;
1247 qp->sq.max_sges = attrs->cap.max_send_sge;
1248 qp->state = QED_ROCE_QP_STATE_RESET;
1249 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1250 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1251 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1252 qp->dev = dev;
df158561 1253 qp->rq.max_sges = attrs->cap.max_recv_sge;
cecbcddf 1254
df158561
AR
1255 DP_DEBUG(dev, QEDR_MSG_QP,
1256 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1257 qp->rq.max_sges, qp->rq_cq->icid);
cecbcddf
RA
1258 DP_DEBUG(dev, QEDR_MSG_QP,
1259 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1260 pd->pd_id, qp->qp_type, qp->max_inline_data,
1261 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1262 DP_DEBUG(dev, QEDR_MSG_QP,
1263 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1264 qp->sq.max_sges, qp->sq_cq->icid);
cecbcddf
RA
1265}
1266
df158561 1267static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
cecbcddf
RA
1268{
1269 qp->sq.db = dev->db_addr +
1270 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1271 qp->sq.db_data.data.icid = qp->icid + 1;
df158561
AR
1272 qp->rq.db = dev->db_addr +
1273 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1274 qp->rq.db_data.data.icid = qp->icid;
cecbcddf
RA
1275}
1276
1277static inline void
df158561
AR
1278qedr_init_common_qp_in_params(struct qedr_dev *dev,
1279 struct qedr_pd *pd,
1280 struct qedr_qp *qp,
1281 struct ib_qp_init_attr *attrs,
1282 bool fmr_and_reserved_lkey,
1283 struct qed_rdma_create_qp_in_params *params)
cecbcddf 1284{
df158561
AR
1285 /* QP handle to be written in an async event */
1286 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1287 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1288
1289 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1290 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1291 params->pd = pd->pd_id;
1292 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1293 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1294 params->stats_queue = 0;
1295 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1296 params->srq_id = 0;
1297 params->use_srq = false;
cecbcddf
RA
1298}
1299
df158561 1300static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
cecbcddf 1301{
df158561
AR
1302 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1303 "qp=%p. "
1304 "sq_addr=0x%llx, "
1305 "sq_len=%zd, "
1306 "rq_addr=0x%llx, "
1307 "rq_len=%zd"
1308 "\n",
1309 qp,
1310 qp->usq.buf_addr,
1311 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1312}
cecbcddf 1313
de0089e6
KM
1314static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
1315{
1316 int rc;
1317
1318 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1319 return 0;
1320
1321 idr_preload(GFP_KERNEL);
1322 spin_lock_irq(&dev->idr_lock);
1323
1324 rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
1325
1326 spin_unlock_irq(&dev->idr_lock);
1327 idr_preload_end();
1328
1329 return rc < 0 ? rc : 0;
1330}
1331
1332static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
1333{
1334 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
1335 return;
1336
1337 spin_lock_irq(&dev->idr_lock);
1338 idr_remove(&dev->qpidr, id);
1339 spin_unlock_irq(&dev->idr_lock);
1340}
69ad0e7f
KM
1341
1342static inline void
1343qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1344 struct qedr_qp *qp,
1345 struct qed_rdma_create_qp_out_params *out_params)
1346{
1347 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1348 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1349
1350 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1351 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1352
1353 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1354 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1355
1356 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1357 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1358}
1359
df158561
AR
1360static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1361{
1362 if (qp->usq.umem)
1363 ib_umem_release(qp->usq.umem);
1364 qp->usq.umem = NULL;
cecbcddf 1365
df158561
AR
1366 if (qp->urq.umem)
1367 ib_umem_release(qp->urq.umem);
1368 qp->urq.umem = NULL;
cecbcddf
RA
1369}
1370
df158561
AR
1371static int qedr_create_user_qp(struct qedr_dev *dev,
1372 struct qedr_qp *qp,
1373 struct ib_pd *ibpd,
1374 struct ib_udata *udata,
1375 struct ib_qp_init_attr *attrs)
cecbcddf 1376{
df158561
AR
1377 struct qed_rdma_create_qp_in_params in_params;
1378 struct qed_rdma_create_qp_out_params out_params;
1379 struct qedr_pd *pd = get_qedr_pd(ibpd);
1380 struct ib_ucontext *ib_ctx = NULL;
df158561 1381 struct qedr_create_qp_ureq ureq;
69ad0e7f 1382 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
df158561 1383 int rc = -EINVAL;
cecbcddf 1384
df158561 1385 ib_ctx = ibpd->uobject->context;
cecbcddf 1386
df158561
AR
1387 memset(&ureq, 0, sizeof(ureq));
1388 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1389 if (rc) {
1390 DP_ERR(dev, "Problem copying data from user space\n");
1391 return rc;
1392 }
cecbcddf 1393
df158561
AR
1394 /* SQ - read access only (0), dma sync not required (0) */
1395 rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
69ad0e7f 1396 ureq.sq_len, 0, 0, alloc_and_init);
df158561
AR
1397 if (rc)
1398 return rc;
cecbcddf 1399
df158561
AR
1400 /* RQ - read access only (0), dma sync not required (0) */
1401 rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
69ad0e7f 1402 ureq.rq_len, 0, 0, alloc_and_init);
df158561
AR
1403 if (rc)
1404 return rc;
1405
1406 memset(&in_params, 0, sizeof(in_params));
1407 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1408 in_params.qp_handle_lo = ureq.qp_handle_lo;
1409 in_params.qp_handle_hi = ureq.qp_handle_hi;
1410 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1411 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1412 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1413 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1414
1415 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1416 &in_params, &out_params);
1417
1418 if (!qp->qed_qp) {
1419 rc = -ENOMEM;
1420 goto err1;
1421 }
1422
69ad0e7f
KM
1423 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1424 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1425
df158561
AR
1426 qp->qp_id = out_params.qp_id;
1427 qp->icid = out_params.icid;
1428
1429 rc = qedr_copy_qp_uresp(dev, qp, udata);
1430 if (rc)
1431 goto err;
1432
1433 qedr_qp_user_print(dev, qp);
cecbcddf
RA
1434
1435 return 0;
df158561
AR
1436err:
1437 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1438 if (rc)
1439 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1440
1441err1:
1442 qedr_cleanup_user(dev, qp);
1443 return rc;
cecbcddf
RA
1444}
1445
f5b1b177
KM
1446static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1447{
1448 qp->sq.db = dev->db_addr +
1449 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1450 qp->sq.db_data.data.icid = qp->icid;
1451
1452 qp->rq.db = dev->db_addr +
1453 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1454 qp->rq.db_data.data.icid = qp->icid;
1455 qp->rq.iwarp_db2 = dev->db_addr +
1456 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1457 qp->rq.iwarp_db2_data.data.icid = qp->icid;
1458 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1459}
1460
df158561
AR
1461static int
1462qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1463 struct qedr_qp *qp,
1464 struct qed_rdma_create_qp_in_params *in_params,
1465 u32 n_sq_elems, u32 n_rq_elems)
cecbcddf 1466{
df158561 1467 struct qed_rdma_create_qp_out_params out_params;
cecbcddf
RA
1468 int rc;
1469
cecbcddf
RA
1470 rc = dev->ops->common->chain_alloc(dev->cdev,
1471 QED_CHAIN_USE_TO_PRODUCE,
1472 QED_CHAIN_MODE_PBL,
1473 QED_CHAIN_CNT_TYPE_U32,
1474 n_sq_elems,
1475 QEDR_SQE_ELEMENT_SIZE,
1a4a6975 1476 &qp->sq.pbl, NULL);
cecbcddf 1477
df158561
AR
1478 if (rc)
1479 return rc;
cecbcddf 1480
df158561
AR
1481 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1482 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
cecbcddf 1483
cecbcddf
RA
1484 rc = dev->ops->common->chain_alloc(dev->cdev,
1485 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1486 QED_CHAIN_MODE_PBL,
1487 QED_CHAIN_CNT_TYPE_U32,
1488 n_rq_elems,
1489 QEDR_RQE_ELEMENT_SIZE,
1a4a6975 1490 &qp->rq.pbl, NULL);
df158561
AR
1491 if (rc)
1492 return rc;
cecbcddf 1493
df158561
AR
1494 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1495 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
cecbcddf 1496
df158561
AR
1497 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1498 in_params, &out_params);
cecbcddf 1499
df158561
AR
1500 if (!qp->qed_qp)
1501 return -EINVAL;
cecbcddf 1502
df158561
AR
1503 qp->qp_id = out_params.qp_id;
1504 qp->icid = out_params.icid;
cecbcddf 1505
df158561 1506 qedr_set_roce_db_info(dev, qp);
f5b1b177
KM
1507 return rc;
1508}
cecbcddf 1509
f5b1b177
KM
1510static int
1511qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1512 struct qedr_qp *qp,
1513 struct qed_rdma_create_qp_in_params *in_params,
1514 u32 n_sq_elems, u32 n_rq_elems)
1515{
1516 struct qed_rdma_create_qp_out_params out_params;
1517 struct qed_chain_ext_pbl ext_pbl;
1518 int rc;
1519
1520 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1521 QEDR_SQE_ELEMENT_SIZE,
1522 QED_CHAIN_MODE_PBL);
1523 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1524 QEDR_RQE_ELEMENT_SIZE,
1525 QED_CHAIN_MODE_PBL);
1526
1527 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1528 in_params, &out_params);
1529
1530 if (!qp->qed_qp)
1531 return -EINVAL;
1532
1533 /* Now we allocate the chain */
1534 ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1535 ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1536
1537 rc = dev->ops->common->chain_alloc(dev->cdev,
1538 QED_CHAIN_USE_TO_PRODUCE,
1539 QED_CHAIN_MODE_PBL,
1540 QED_CHAIN_CNT_TYPE_U32,
1541 n_sq_elems,
1542 QEDR_SQE_ELEMENT_SIZE,
1543 &qp->sq.pbl, &ext_pbl);
1544
1545 if (rc)
1546 goto err;
1547
1548 ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1549 ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1550
1551 rc = dev->ops->common->chain_alloc(dev->cdev,
1552 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1553 QED_CHAIN_MODE_PBL,
1554 QED_CHAIN_CNT_TYPE_U32,
1555 n_rq_elems,
1556 QEDR_RQE_ELEMENT_SIZE,
1557 &qp->rq.pbl, &ext_pbl);
1558
1559 if (rc)
1560 goto err;
1561
1562 qp->qp_id = out_params.qp_id;
1563 qp->icid = out_params.icid;
1564
1565 qedr_set_iwarp_db_info(dev, qp);
1566 return rc;
1567
1568err:
1569 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1570
1571 return rc;
cecbcddf
RA
1572}
1573
df158561 1574static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
cecbcddf 1575{
df158561
AR
1576 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1577 kfree(qp->wqe_wr_id);
cecbcddf 1578
df158561
AR
1579 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1580 kfree(qp->rqe_wr_id);
cecbcddf
RA
1581}
1582
df158561
AR
1583static int qedr_create_kernel_qp(struct qedr_dev *dev,
1584 struct qedr_qp *qp,
1585 struct ib_pd *ibpd,
1586 struct ib_qp_init_attr *attrs)
cecbcddf 1587{
df158561
AR
1588 struct qed_rdma_create_qp_in_params in_params;
1589 struct qedr_pd *pd = get_qedr_pd(ibpd);
1590 int rc = -EINVAL;
1591 u32 n_rq_elems;
1592 u32 n_sq_elems;
1593 u32 n_sq_entries;
cecbcddf 1594
df158561 1595 memset(&in_params, 0, sizeof(in_params));
cecbcddf 1596
df158561
AR
1597 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1598 * the ring. The ring should allow at least a single WR, even if the
1599 * user requested none, due to allocation issues.
1600 * We should add an extra WR since the prod and cons indices of
1601 * wqe_wr_id are managed in such a way that the WQ is considered full
1602 * when (prod+1)%max_wr==cons. We currently don't do that because we
1603 * double the number of entries due an iSER issue that pushes far more
1604 * WRs than indicated. If we decline its ib_post_send() then we get
1605 * error prints in the dmesg we'd like to avoid.
1606 */
1607 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
1608 dev->attr.max_sqe);
cecbcddf 1609
6396bb22 1610 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
df158561
AR
1611 GFP_KERNEL);
1612 if (!qp->wqe_wr_id) {
1613 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
1614 return -ENOMEM;
1615 }
cecbcddf 1616
df158561
AR
1617 /* QP handle to be written in CQE */
1618 in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
1619 in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
cecbcddf 1620
df158561
AR
1621 /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1622 * the ring. There ring should allow at least a single WR, even if the
1623 * user requested none, due to allocation issues.
1624 */
1625 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
cecbcddf 1626
df158561 1627 /* Allocate driver internal RQ array */
6396bb22 1628 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
df158561
AR
1629 GFP_KERNEL);
1630 if (!qp->rqe_wr_id) {
1631 DP_ERR(dev,
1632 "create qp: failed RQ shadow memory allocation\n");
1633 kfree(qp->wqe_wr_id);
1634 return -ENOMEM;
cecbcddf
RA
1635 }
1636
df158561 1637 qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
cecbcddf 1638
df158561
AR
1639 n_sq_entries = attrs->cap.max_send_wr;
1640 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1641 n_sq_entries = max_t(u32, n_sq_entries, 1);
1642 n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
cecbcddf 1643
df158561
AR
1644 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1645
f5b1b177
KM
1646 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1647 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
1648 n_sq_elems, n_rq_elems);
1649 else
1650 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
1651 n_sq_elems, n_rq_elems);
df158561
AR
1652 if (rc)
1653 qedr_cleanup_kernel(dev, qp);
cecbcddf
RA
1654
1655 return rc;
1656}
1657
1658struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1659 struct ib_qp_init_attr *attrs,
1660 struct ib_udata *udata)
1661{
1662 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
cecbcddf 1663 struct qedr_pd *pd = get_qedr_pd(ibpd);
cecbcddf 1664 struct qedr_qp *qp;
181d8015 1665 struct ib_qp *ibqp;
cecbcddf
RA
1666 int rc = 0;
1667
1668 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1669 udata ? "user library" : "kernel", pd);
1670
1671 rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1672 if (rc)
1673 return ERR_PTR(rc);
1674
181d8015
WY
1675 if (attrs->srq)
1676 return ERR_PTR(-EINVAL);
1677
cecbcddf 1678 DP_DEBUG(dev, QEDR_MSG_QP,
df158561
AR
1679 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1680 udata ? "user library" : "kernel", attrs->event_handler, pd,
cecbcddf
RA
1681 get_qedr_cq(attrs->send_cq),
1682 get_qedr_cq(attrs->send_cq)->icid,
1683 get_qedr_cq(attrs->recv_cq),
1684 get_qedr_cq(attrs->recv_cq)->icid);
1685
df158561
AR
1686 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1687 if (!qp) {
1688 DP_ERR(dev, "create qp: failed allocating memory\n");
1689 return ERR_PTR(-ENOMEM);
1690 }
1691
1692 qedr_set_common_qp_params(dev, qp, pd, attrs);
cecbcddf 1693
04886779 1694 if (attrs->qp_type == IB_QPT_GSI) {
181d8015
WY
1695 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1696 if (IS_ERR(ibqp))
1697 kfree(qp);
1698 return ibqp;
04886779
RA
1699 }
1700
df158561
AR
1701 if (udata)
1702 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
1703 else
1704 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
cecbcddf 1705
df158561
AR
1706 if (rc)
1707 goto err;
cecbcddf 1708
cecbcddf
RA
1709 qp->ibqp.qp_num = qp->qp_id;
1710
de0089e6
KM
1711 rc = qedr_idr_add(dev, qp, qp->qp_id);
1712 if (rc)
1713 goto err;
1714
cecbcddf
RA
1715 return &qp->ibqp;
1716
df158561 1717err:
cecbcddf
RA
1718 kfree(qp);
1719
1720 return ERR_PTR(-EFAULT);
1721}
1722
27a4b1a6 1723static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
cecbcddf
RA
1724{
1725 switch (qp_state) {
1726 case QED_ROCE_QP_STATE_RESET:
1727 return IB_QPS_RESET;
1728 case QED_ROCE_QP_STATE_INIT:
1729 return IB_QPS_INIT;
1730 case QED_ROCE_QP_STATE_RTR:
1731 return IB_QPS_RTR;
1732 case QED_ROCE_QP_STATE_RTS:
1733 return IB_QPS_RTS;
1734 case QED_ROCE_QP_STATE_SQD:
1735 return IB_QPS_SQD;
1736 case QED_ROCE_QP_STATE_ERR:
1737 return IB_QPS_ERR;
1738 case QED_ROCE_QP_STATE_SQE:
1739 return IB_QPS_SQE;
1740 }
1741 return IB_QPS_ERR;
1742}
1743
27a4b1a6
RA
1744static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1745 enum ib_qp_state qp_state)
cecbcddf
RA
1746{
1747 switch (qp_state) {
1748 case IB_QPS_RESET:
1749 return QED_ROCE_QP_STATE_RESET;
1750 case IB_QPS_INIT:
1751 return QED_ROCE_QP_STATE_INIT;
1752 case IB_QPS_RTR:
1753 return QED_ROCE_QP_STATE_RTR;
1754 case IB_QPS_RTS:
1755 return QED_ROCE_QP_STATE_RTS;
1756 case IB_QPS_SQD:
1757 return QED_ROCE_QP_STATE_SQD;
1758 case IB_QPS_ERR:
1759 return QED_ROCE_QP_STATE_ERR;
1760 default:
1761 return QED_ROCE_QP_STATE_ERR;
1762 }
1763}
1764
1765static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1766{
1767 qed_chain_reset(&qph->pbl);
1768 qph->prod = 0;
1769 qph->cons = 0;
1770 qph->wqe_cons = 0;
1771 qph->db_data.data.value = cpu_to_le16(0);
1772}
1773
1774static int qedr_update_qp_state(struct qedr_dev *dev,
1775 struct qedr_qp *qp,
caf61b1b 1776 enum qed_roce_qp_state cur_state,
cecbcddf
RA
1777 enum qed_roce_qp_state new_state)
1778{
1779 int status = 0;
1780
caf61b1b 1781 if (new_state == cur_state)
865cea40 1782 return 0;
cecbcddf 1783
caf61b1b 1784 switch (cur_state) {
cecbcddf
RA
1785 case QED_ROCE_QP_STATE_RESET:
1786 switch (new_state) {
1787 case QED_ROCE_QP_STATE_INIT:
1788 qp->prev_wqe_size = 0;
1789 qedr_reset_qp_hwq_info(&qp->sq);
1790 qedr_reset_qp_hwq_info(&qp->rq);
1791 break;
1792 default:
1793 status = -EINVAL;
1794 break;
1795 };
1796 break;
1797 case QED_ROCE_QP_STATE_INIT:
1798 switch (new_state) {
1799 case QED_ROCE_QP_STATE_RTR:
1800 /* Update doorbell (in case post_recv was
1801 * done before move to RTR)
1802 */
f5b1b177
KM
1803
1804 if (rdma_protocol_roce(&dev->ibdev, 1)) {
f5b1b177
KM
1805 writel(qp->rq.db_data.raw, qp->rq.db);
1806 /* Make sure write takes effect */
1807 mmiowb();
1808 }
cecbcddf
RA
1809 break;
1810 case QED_ROCE_QP_STATE_ERR:
1811 break;
1812 default:
1813 /* Invalid state change. */
1814 status = -EINVAL;
1815 break;
1816 };
1817 break;
1818 case QED_ROCE_QP_STATE_RTR:
1819 /* RTR->XXX */
1820 switch (new_state) {
1821 case QED_ROCE_QP_STATE_RTS:
1822 break;
1823 case QED_ROCE_QP_STATE_ERR:
1824 break;
1825 default:
1826 /* Invalid state change. */
1827 status = -EINVAL;
1828 break;
1829 };
1830 break;
1831 case QED_ROCE_QP_STATE_RTS:
1832 /* RTS->XXX */
1833 switch (new_state) {
1834 case QED_ROCE_QP_STATE_SQD:
1835 break;
1836 case QED_ROCE_QP_STATE_ERR:
1837 break;
1838 default:
1839 /* Invalid state change. */
1840 status = -EINVAL;
1841 break;
1842 };
1843 break;
1844 case QED_ROCE_QP_STATE_SQD:
1845 /* SQD->XXX */
1846 switch (new_state) {
1847 case QED_ROCE_QP_STATE_RTS:
1848 case QED_ROCE_QP_STATE_ERR:
1849 break;
1850 default:
1851 /* Invalid state change. */
1852 status = -EINVAL;
1853 break;
1854 };
1855 break;
1856 case QED_ROCE_QP_STATE_ERR:
1857 /* ERR->XXX */
1858 switch (new_state) {
1859 case QED_ROCE_QP_STATE_RESET:
933e6dca
RA
1860 if ((qp->rq.prod != qp->rq.cons) ||
1861 (qp->sq.prod != qp->sq.cons)) {
1862 DP_NOTICE(dev,
1863 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1864 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1865 qp->sq.cons);
1866 status = -EINVAL;
1867 }
cecbcddf
RA
1868 break;
1869 default:
1870 status = -EINVAL;
1871 break;
1872 };
1873 break;
1874 default:
1875 status = -EINVAL;
1876 break;
1877 };
1878
1879 return status;
1880}
1881
1882int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1883 int attr_mask, struct ib_udata *udata)
1884{
1885 struct qedr_qp *qp = get_qedr_qp(ibqp);
1886 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1887 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
d8966fcd 1888 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
cecbcddf 1889 enum ib_qp_state old_qp_state, new_qp_state;
caf61b1b 1890 enum qed_roce_qp_state cur_state;
cecbcddf
RA
1891 int rc = 0;
1892
1893 DP_DEBUG(dev, QEDR_MSG_QP,
1894 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1895 attr->qp_state);
1896
1897 old_qp_state = qedr_get_ibqp_state(qp->state);
1898 if (attr_mask & IB_QP_STATE)
1899 new_qp_state = attr->qp_state;
1900 else
1901 new_qp_state = old_qp_state;
1902
f5b1b177
KM
1903 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1904 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
1905 ibqp->qp_type, attr_mask,
1906 IB_LINK_LAYER_ETHERNET)) {
1907 DP_ERR(dev,
1908 "modify qp: invalid attribute mask=0x%x specified for\n"
1909 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1910 attr_mask, qp->qp_id, ibqp->qp_type,
1911 old_qp_state, new_qp_state);
1912 rc = -EINVAL;
1913 goto err;
1914 }
cecbcddf
RA
1915 }
1916
1917 /* Translate the masks... */
1918 if (attr_mask & IB_QP_STATE) {
1919 SET_FIELD(qp_params.modify_flags,
1920 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1921 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1922 }
1923
1924 if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1925 qp_params.sqd_async = true;
1926
1927 if (attr_mask & IB_QP_PKEY_INDEX) {
1928 SET_FIELD(qp_params.modify_flags,
1929 QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1930 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1931 rc = -EINVAL;
1932 goto err;
1933 }
1934
1935 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1936 }
1937
1938 if (attr_mask & IB_QP_QKEY)
1939 qp->qkey = attr->qkey;
1940
1941 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1942 SET_FIELD(qp_params.modify_flags,
1943 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1944 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1945 IB_ACCESS_REMOTE_READ;
1946 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1947 IB_ACCESS_REMOTE_WRITE;
1948 qp_params.incoming_atomic_en = attr->qp_access_flags &
1949 IB_ACCESS_REMOTE_ATOMIC;
1950 }
1951
1952 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1953 if (attr_mask & IB_QP_PATH_MTU) {
1954 if (attr->path_mtu < IB_MTU_256 ||
1955 attr->path_mtu > IB_MTU_4096) {
1956 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1957 rc = -EINVAL;
1958 goto err;
1959 }
1960 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1961 ib_mtu_enum_to_int(iboe_get_mtu
1962 (dev->ndev->mtu)));
1963 }
1964
1965 if (!qp->mtu) {
1966 qp->mtu =
1967 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1968 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1969 }
1970
1971 SET_FIELD(qp_params.modify_flags,
1972 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1973
d8966fcd
DC
1974 qp_params.traffic_class_tos = grh->traffic_class;
1975 qp_params.flow_label = grh->flow_label;
1976 qp_params.hop_limit_ttl = grh->hop_limit;
cecbcddf 1977
d8966fcd 1978 qp->sgid_idx = grh->sgid_index;
cecbcddf
RA
1979
1980 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1981 if (rc) {
1982 DP_ERR(dev,
1983 "modify qp: problems with GID index %d (rc=%d)\n",
d8966fcd 1984 grh->sgid_index, rc);
cecbcddf
RA
1985 return rc;
1986 }
1987
1988 rc = qedr_get_dmac(dev, &attr->ah_attr,
1989 qp_params.remote_mac_addr);
1990 if (rc)
1991 return rc;
1992
1993 qp_params.use_local_mac = true;
1994 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1995
1996 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1997 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1998 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1999 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2000 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2001 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2002 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2003 qp_params.remote_mac_addr);
cecbcddf
RA
2004
2005 qp_params.mtu = qp->mtu;
2006 qp_params.lb_indication = false;
2007 }
2008
2009 if (!qp_params.mtu) {
2010 /* Stay with current MTU */
2011 if (qp->mtu)
2012 qp_params.mtu = qp->mtu;
2013 else
2014 qp_params.mtu =
2015 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2016 }
2017
2018 if (attr_mask & IB_QP_TIMEOUT) {
2019 SET_FIELD(qp_params.modify_flags,
2020 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2021
c3594f22
KM
2022 /* The received timeout value is an exponent used like this:
2023 * "12.7.34 LOCAL ACK TIMEOUT
2024 * Value representing the transport (ACK) timeout for use by
2025 * the remote, expressed as: 4.096 * 2^timeout [usec]"
2026 * The FW expects timeout in msec so we need to divide the usec
2027 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2028 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2029 * The value of zero means infinite so we use a 'max_t' to make
2030 * sure that sub 1 msec values will be configured as 1 msec.
2031 */
2032 if (attr->timeout)
2033 qp_params.ack_timeout =
2034 1 << max_t(int, attr->timeout - 8, 0);
2035 else
cecbcddf 2036 qp_params.ack_timeout = 0;
cecbcddf 2037 }
c3594f22 2038
cecbcddf
RA
2039 if (attr_mask & IB_QP_RETRY_CNT) {
2040 SET_FIELD(qp_params.modify_flags,
2041 QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2042 qp_params.retry_cnt = attr->retry_cnt;
2043 }
2044
2045 if (attr_mask & IB_QP_RNR_RETRY) {
2046 SET_FIELD(qp_params.modify_flags,
2047 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2048 qp_params.rnr_retry_cnt = attr->rnr_retry;
2049 }
2050
2051 if (attr_mask & IB_QP_RQ_PSN) {
2052 SET_FIELD(qp_params.modify_flags,
2053 QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2054 qp_params.rq_psn = attr->rq_psn;
2055 qp->rq_psn = attr->rq_psn;
2056 }
2057
2058 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2059 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2060 rc = -EINVAL;
2061 DP_ERR(dev,
2062 "unsupported max_rd_atomic=%d, supported=%d\n",
2063 attr->max_rd_atomic,
2064 dev->attr.max_qp_req_rd_atomic_resc);
2065 goto err;
2066 }
2067
2068 SET_FIELD(qp_params.modify_flags,
2069 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2070 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2071 }
2072
2073 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2074 SET_FIELD(qp_params.modify_flags,
2075 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2076 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2077 }
2078
2079 if (attr_mask & IB_QP_SQ_PSN) {
2080 SET_FIELD(qp_params.modify_flags,
2081 QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2082 qp_params.sq_psn = attr->sq_psn;
2083 qp->sq_psn = attr->sq_psn;
2084 }
2085
2086 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2087 if (attr->max_dest_rd_atomic >
2088 dev->attr.max_qp_resp_rd_atomic_resc) {
2089 DP_ERR(dev,
2090 "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2091 attr->max_dest_rd_atomic,
2092 dev->attr.max_qp_resp_rd_atomic_resc);
2093
2094 rc = -EINVAL;
2095 goto err;
2096 }
2097
2098 SET_FIELD(qp_params.modify_flags,
2099 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2100 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2101 }
2102
2103 if (attr_mask & IB_QP_DEST_QPN) {
2104 SET_FIELD(qp_params.modify_flags,
2105 QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2106
2107 qp_params.dest_qp = attr->dest_qp_num;
2108 qp->dest_qp_num = attr->dest_qp_num;
2109 }
2110
caf61b1b
KM
2111 cur_state = qp->state;
2112
2113 /* Update the QP state before the actual ramrod to prevent a race with
2114 * fast path. Modifying the QP state to error will cause the device to
2115 * flush the CQEs and while polling the flushed CQEs will considered as
2116 * a potential issue if the QP isn't in error state.
2117 */
2118 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2119 !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2120 qp->state = QED_ROCE_QP_STATE_ERR;
2121
cecbcddf
RA
2122 if (qp->qp_type != IB_QPT_GSI)
2123 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2124 qp->qed_qp, &qp_params);
2125
2126 if (attr_mask & IB_QP_STATE) {
2127 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
caf61b1b
KM
2128 rc = qedr_update_qp_state(dev, qp, cur_state,
2129 qp_params.new_state);
cecbcddf
RA
2130 qp->state = qp_params.new_state;
2131 }
2132
2133err:
2134 return rc;
2135}
2136
2137static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2138{
2139 int ib_qp_acc_flags = 0;
2140
2141 if (params->incoming_rdma_write_en)
2142 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2143 if (params->incoming_rdma_read_en)
2144 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2145 if (params->incoming_atomic_en)
2146 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2147 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2148 return ib_qp_acc_flags;
2149}
2150
2151int qedr_query_qp(struct ib_qp *ibqp,
2152 struct ib_qp_attr *qp_attr,
2153 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2154{
2155 struct qed_rdma_query_qp_out_params params;
2156 struct qedr_qp *qp = get_qedr_qp(ibqp);
2157 struct qedr_dev *dev = qp->dev;
2158 int rc = 0;
2159
2160 memset(&params, 0, sizeof(params));
2161
2162 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2163 if (rc)
2164 goto err;
2165
2166 memset(qp_attr, 0, sizeof(*qp_attr));
2167 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2168
2169 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2170 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
097b6159 2171 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
cecbcddf
RA
2172 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2173 qp_attr->rq_psn = params.rq_psn;
2174 qp_attr->sq_psn = params.sq_psn;
2175 qp_attr->dest_qp_num = params.dest_qp;
2176
2177 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2178
2179 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2180 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2181 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2182 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
59e8970b 2183 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
cecbcddf
RA
2184 qp_init_attr->cap = qp_attr->cap;
2185
44c58487 2186 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
d8966fcd
DC
2187 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2188 params.flow_label, qp->sgid_idx,
2189 params.hop_limit_ttl, params.traffic_class_tos);
2190 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2191 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2192 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
cecbcddf
RA
2193 qp_attr->timeout = params.timeout;
2194 qp_attr->rnr_retry = params.rnr_retry;
2195 qp_attr->retry_cnt = params.retry_cnt;
2196 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2197 qp_attr->pkey_index = params.pkey_index;
2198 qp_attr->port_num = 1;
d8966fcd
DC
2199 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2200 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
cecbcddf
RA
2201 qp_attr->alt_pkey_index = 0;
2202 qp_attr->alt_port_num = 0;
2203 qp_attr->alt_timeout = 0;
2204 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2205
2206 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2207 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2208 qp_attr->max_rd_atomic = params.max_rd_atomic;
2209 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2210
2211 DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2212 qp_attr->cap.max_inline_data);
2213
2214err:
2215 return rc;
2216}
2217
0089985e 2218static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
df158561
AR
2219{
2220 int rc = 0;
2221
2222 if (qp->qp_type != IB_QPT_GSI) {
2223 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2224 if (rc)
2225 return rc;
2226 }
2227
2228 if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2229 qedr_cleanup_user(dev, qp);
2230 else
2231 qedr_cleanup_kernel(dev, qp);
2232
2233 return 0;
2234}
2235
cecbcddf
RA
2236int qedr_destroy_qp(struct ib_qp *ibqp)
2237{
2238 struct qedr_qp *qp = get_qedr_qp(ibqp);
2239 struct qedr_dev *dev = qp->dev;
2240 struct ib_qp_attr attr;
2241 int attr_mask = 0;
2242 int rc = 0;
2243
2244 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2245 qp, qp->qp_type);
2246
f5b1b177
KM
2247 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2248 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2249 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2250 (qp->state != QED_ROCE_QP_STATE_INIT)) {
b4c2cc48 2251
f5b1b177
KM
2252 attr.qp_state = IB_QPS_ERR;
2253 attr_mask |= IB_QP_STATE;
cecbcddf 2254
f5b1b177
KM
2255 /* Change the QP state to ERROR */
2256 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2257 }
e411e058
KM
2258 } else {
2259 /* Wait for the connect/accept to complete */
2260 if (qp->ep) {
2261 int wait_count = 1;
2262
2263 while (qp->ep->during_connect) {
2264 DP_DEBUG(dev, QEDR_MSG_QP,
2265 "Still in during connect/accept\n");
2266
2267 msleep(100);
2268 if (wait_count++ > 200) {
2269 DP_NOTICE(dev,
2270 "during connect timeout\n");
2271 break;
2272 }
2273 }
2274 }
cecbcddf
RA
2275 }
2276
df158561 2277 if (qp->qp_type == IB_QPT_GSI)
04886779 2278 qedr_destroy_gsi_qp(dev);
cecbcddf 2279
df158561 2280 qedr_free_qp_resources(dev, qp);
cecbcddf 2281
de0089e6
KM
2282 if (atomic_dec_and_test(&qp->refcnt)) {
2283 qedr_idr_remove(dev, qp->qp_id);
2284 kfree(qp);
2285 }
cecbcddf
RA
2286 return rc;
2287}
e0290cce 2288
90898850 2289struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
477864c8 2290 struct ib_udata *udata)
04886779
RA
2291{
2292 struct qedr_ah *ah;
2293
2294 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2295 if (!ah)
2296 return ERR_PTR(-ENOMEM);
2297
d97099fe 2298 rdma_copy_ah_attr(&ah->attr, attr);
04886779
RA
2299
2300 return &ah->ibah;
2301}
2302
2303int qedr_destroy_ah(struct ib_ah *ibah)
2304{
2305 struct qedr_ah *ah = get_qedr_ah(ibah);
2306
d97099fe 2307 rdma_destroy_ah_attr(&ah->attr);
04886779
RA
2308 kfree(ah);
2309 return 0;
2310}
2311
e0290cce
RA
2312static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2313{
2314 struct qedr_pbl *pbl, *tmp;
2315
2316 if (info->pbl_table)
2317 list_add_tail(&info->pbl_table->list_entry,
2318 &info->free_pbl_list);
2319
2320 if (!list_empty(&info->inuse_pbl_list))
2321 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2322
2323 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2324 list_del(&pbl->list_entry);
2325 qedr_free_pbl(dev, &info->pbl_info, pbl);
2326 }
2327}
2328
2329static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2330 size_t page_list_len, bool two_layered)
2331{
2332 struct qedr_pbl *tmp;
2333 int rc;
2334
2335 INIT_LIST_HEAD(&info->free_pbl_list);
2336 INIT_LIST_HEAD(&info->inuse_pbl_list);
2337
2338 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2339 page_list_len, two_layered);
2340 if (rc)
2341 goto done;
2342
2343 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
4cd33aaf
CJ
2344 if (IS_ERR(info->pbl_table)) {
2345 rc = PTR_ERR(info->pbl_table);
e0290cce
RA
2346 goto done;
2347 }
2348
2349 DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2350 &info->pbl_table->pa);
2351
2352 /* in usual case we use 2 PBLs, so we add one to free
2353 * list and allocating another one
2354 */
2355 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
4cd33aaf 2356 if (IS_ERR(tmp)) {
e0290cce
RA
2357 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2358 goto done;
2359 }
2360
2361 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2362
2363 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2364
2365done:
2366 if (rc)
2367 free_mr_info(dev, info);
2368
2369 return rc;
2370}
2371
2372struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2373 u64 usr_addr, int acc, struct ib_udata *udata)
2374{
2375 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2376 struct qedr_mr *mr;
2377 struct qedr_pd *pd;
2378 int rc = -ENOMEM;
2379
2380 pd = get_qedr_pd(ibpd);
2381 DP_DEBUG(dev, QEDR_MSG_MR,
2382 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2383 pd->pd_id, start, len, usr_addr, acc);
2384
2385 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2386 return ERR_PTR(-EINVAL);
2387
2388 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2389 if (!mr)
2390 return ERR_PTR(rc);
2391
2392 mr->type = QEDR_MR_USER;
2393
2394 mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2395 if (IS_ERR(mr->umem)) {
2396 rc = -EFAULT;
2397 goto err0;
2398 }
2399
2400 rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2401 if (rc)
2402 goto err1;
2403
2404 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
e57bb6be 2405 &mr->info.pbl_info, mr->umem->page_shift);
e0290cce
RA
2406
2407 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2408 if (rc) {
2409 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2410 goto err1;
2411 }
2412
2413 /* Index only, 18 bit long, lkey = itid << 8 | key */
2414 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2415 mr->hw_mr.key = 0;
2416 mr->hw_mr.pd = pd->pd_id;
2417 mr->hw_mr.local_read = 1;
2418 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2419 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2420 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2421 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2422 mr->hw_mr.mw_bind = false;
2423 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2424 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2425 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3e7e1193 2426 mr->hw_mr.page_size_log = mr->umem->page_shift;
e0290cce
RA
2427 mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2428 mr->hw_mr.length = len;
2429 mr->hw_mr.vaddr = usr_addr;
2430 mr->hw_mr.zbva = false;
2431 mr->hw_mr.phy_mr = false;
2432 mr->hw_mr.dma_mr = false;
2433
2434 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2435 if (rc) {
2436 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2437 goto err2;
2438 }
2439
2440 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2441 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2442 mr->hw_mr.remote_atomic)
2443 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2444
2445 DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2446 mr->ibmr.lkey);
2447 return &mr->ibmr;
2448
2449err2:
2450 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2451err1:
2452 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2453err0:
2454 kfree(mr);
2455 return ERR_PTR(rc);
2456}
2457
2458int qedr_dereg_mr(struct ib_mr *ib_mr)
2459{
2460 struct qedr_mr *mr = get_qedr_mr(ib_mr);
2461 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2462 int rc = 0;
2463
2464 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2465 if (rc)
2466 return rc;
2467
2468 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2469
2470 if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2471 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2472
2473 /* it could be user registered memory. */
2474 if (mr->umem)
2475 ib_umem_release(mr->umem);
2476
2477 kfree(mr);
2478
2479 return rc;
2480}
2481
27a4b1a6
RA
2482static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2483 int max_page_list_len)
e0290cce
RA
2484{
2485 struct qedr_pd *pd = get_qedr_pd(ibpd);
2486 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2487 struct qedr_mr *mr;
2488 int rc = -ENOMEM;
2489
2490 DP_DEBUG(dev, QEDR_MSG_MR,
2491 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2492 max_page_list_len);
2493
2494 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2495 if (!mr)
2496 return ERR_PTR(rc);
2497
2498 mr->dev = dev;
2499 mr->type = QEDR_MR_FRMR;
2500
2501 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2502 if (rc)
2503 goto err0;
2504
2505 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2506 if (rc) {
2507 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2508 goto err0;
2509 }
2510
2511 /* Index only, 18 bit long, lkey = itid << 8 | key */
2512 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2513 mr->hw_mr.key = 0;
2514 mr->hw_mr.pd = pd->pd_id;
2515 mr->hw_mr.local_read = 1;
2516 mr->hw_mr.local_write = 0;
2517 mr->hw_mr.remote_read = 0;
2518 mr->hw_mr.remote_write = 0;
2519 mr->hw_mr.remote_atomic = 0;
2520 mr->hw_mr.mw_bind = false;
2521 mr->hw_mr.pbl_ptr = 0;
2522 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2523 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2524 mr->hw_mr.fbo = 0;
2525 mr->hw_mr.length = 0;
2526 mr->hw_mr.vaddr = 0;
2527 mr->hw_mr.zbva = false;
2528 mr->hw_mr.phy_mr = true;
2529 mr->hw_mr.dma_mr = false;
2530
2531 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2532 if (rc) {
2533 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2534 goto err1;
2535 }
2536
2537 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2538 mr->ibmr.rkey = mr->ibmr.lkey;
2539
2540 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2541 return mr;
2542
2543err1:
2544 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2545err0:
2546 kfree(mr);
2547 return ERR_PTR(rc);
2548}
2549
2550struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2551 enum ib_mr_type mr_type, u32 max_num_sg)
2552{
e0290cce
RA
2553 struct qedr_mr *mr;
2554
2555 if (mr_type != IB_MR_TYPE_MEM_REG)
2556 return ERR_PTR(-EINVAL);
2557
2558 mr = __qedr_alloc_mr(ibpd, max_num_sg);
2559
2560 if (IS_ERR(mr))
2561 return ERR_PTR(-EINVAL);
2562
e0290cce
RA
2563 return &mr->ibmr;
2564}
2565
2566static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2567{
2568 struct qedr_mr *mr = get_qedr_mr(ibmr);
2569 struct qedr_pbl *pbl_table;
2570 struct regpair *pbe;
2571 u32 pbes_in_page;
2572
2573 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
ffab8c89 2574 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
e0290cce
RA
2575 return -ENOMEM;
2576 }
2577
2578 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2579 mr->npages, addr);
2580
2581 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2582 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2583 pbe = (struct regpair *)pbl_table->va;
2584 pbe += mr->npages % pbes_in_page;
2585 pbe->lo = cpu_to_le32((u32)addr);
2586 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2587
2588 mr->npages++;
2589
2590 return 0;
2591}
2592
2593static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2594{
2595 int work = info->completed - info->completed_handled - 1;
2596
2597 DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2598 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2599 struct qedr_pbl *pbl;
2600
2601 /* Free all the page list that are possible to be freed
2602 * (all the ones that were invalidated), under the assumption
2603 * that if an FMR was completed successfully that means that
2604 * if there was an invalidate operation before it also ended
2605 */
2606 pbl = list_first_entry(&info->inuse_pbl_list,
2607 struct qedr_pbl, list_entry);
aafec388 2608 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
e0290cce
RA
2609 info->completed_handled++;
2610 }
2611}
2612
2613int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2614 int sg_nents, unsigned int *sg_offset)
2615{
2616 struct qedr_mr *mr = get_qedr_mr(ibmr);
2617
2618 mr->npages = 0;
2619
2620 handle_completed_mrs(mr->dev, &mr->info);
2621 return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2622}
2623
2624struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2625{
2626 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2627 struct qedr_pd *pd = get_qedr_pd(ibpd);
2628 struct qedr_mr *mr;
2629 int rc;
2630
2631 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2632 if (!mr)
2633 return ERR_PTR(-ENOMEM);
2634
2635 mr->type = QEDR_MR_DMA;
2636
2637 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2638 if (rc) {
2639 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2640 goto err1;
2641 }
2642
2643 /* index only, 18 bit long, lkey = itid << 8 | key */
2644 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2645 mr->hw_mr.pd = pd->pd_id;
2646 mr->hw_mr.local_read = 1;
2647 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2648 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2649 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2650 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2651 mr->hw_mr.dma_mr = true;
2652
2653 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2654 if (rc) {
2655 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2656 goto err2;
2657 }
2658
2659 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2660 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2661 mr->hw_mr.remote_atomic)
2662 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2663
2664 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2665 return &mr->ibmr;
2666
2667err2:
2668 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2669err1:
2670 kfree(mr);
2671 return ERR_PTR(rc);
2672}
afa0e13b
RA
2673
2674static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2675{
2676 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2677}
2678
2679static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2680{
2681 int i, len = 0;
2682
2683 for (i = 0; i < num_sge; i++)
2684 len += sg_list[i].length;
2685
2686 return len;
2687}
2688
2689static void swap_wqe_data64(u64 *p)
2690{
2691 int i;
2692
2693 for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2694 *p = cpu_to_be64(cpu_to_le64(*p));
2695}
2696
2697static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2698 struct qedr_qp *qp, u8 *wqe_size,
2699 struct ib_send_wr *wr,
2700 struct ib_send_wr **bad_wr, u8 *bits,
2701 u8 bit)
2702{
2703 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2704 char *seg_prt, *wqe;
2705 int i, seg_siz;
2706
2707 if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2708 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2709 *bad_wr = wr;
2710 return 0;
2711 }
2712
2713 if (!data_size)
2714 return data_size;
2715
2716 *bits |= bit;
2717
2718 seg_prt = NULL;
2719 wqe = NULL;
2720 seg_siz = 0;
2721
2722 /* Copy data inline */
2723 for (i = 0; i < wr->num_sge; i++) {
2724 u32 len = wr->sg_list[i].length;
2725 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2726
2727 while (len > 0) {
2728 u32 cur;
2729
2730 /* New segment required */
2731 if (!seg_siz) {
2732 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2733 seg_prt = wqe;
2734 seg_siz = sizeof(struct rdma_sq_common_wqe);
2735 (*wqe_size)++;
2736 }
2737
2738 /* Calculate currently allowed length */
2739 cur = min_t(u32, len, seg_siz);
2740 memcpy(seg_prt, src, cur);
2741
2742 /* Update segment variables */
2743 seg_prt += cur;
2744 seg_siz -= cur;
2745
2746 /* Update sge variables */
2747 src += cur;
2748 len -= cur;
2749
2750 /* Swap fully-completed segments */
2751 if (!seg_siz)
2752 swap_wqe_data64((u64 *)wqe);
2753 }
2754 }
2755
2756 /* swap last not completed segment */
2757 if (seg_siz)
2758 swap_wqe_data64((u64 *)wqe);
2759
2760 return data_size;
2761}
2762
2763#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
2764 do { \
2765 DMA_REGPAIR_LE(sge->addr, vaddr); \
2766 (sge)->length = cpu_to_le32(vlength); \
2767 (sge)->flags = cpu_to_le32(vflags); \
2768 } while (0)
2769
2770#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
2771 do { \
2772 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
2773 (hdr)->num_sges = num_sge; \
2774 } while (0)
2775
2776#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
2777 do { \
2778 DMA_REGPAIR_LE(sge->addr, vaddr); \
2779 (sge)->length = cpu_to_le32(vlength); \
2780 (sge)->l_key = cpu_to_le32(vlkey); \
2781 } while (0)
2782
2783static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
f696bf6d 2784 const struct ib_send_wr *wr)
afa0e13b
RA
2785{
2786 u32 data_size = 0;
2787 int i;
2788
2789 for (i = 0; i < wr->num_sge; i++) {
2790 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2791
2792 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2793 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2794 sge->length = cpu_to_le32(wr->sg_list[i].length);
2795 data_size += wr->sg_list[i].length;
2796 }
2797
2798 if (wqe_size)
2799 *wqe_size += wr->num_sge;
2800
2801 return data_size;
2802}
2803
2804static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2805 struct qedr_qp *qp,
2806 struct rdma_sq_rdma_wqe_1st *rwqe,
2807 struct rdma_sq_rdma_wqe_2nd *rwqe2,
2808 struct ib_send_wr *wr,
2809 struct ib_send_wr **bad_wr)
2810{
2811 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2812 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2813
8b0cabc6
AR
2814 if (wr->send_flags & IB_SEND_INLINE &&
2815 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2816 wr->opcode == IB_WR_RDMA_WRITE)) {
afa0e13b
RA
2817 u8 flags = 0;
2818
2819 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2820 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2821 bad_wr, &rwqe->flags, flags);
2822 }
2823
2824 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2825}
2826
2827static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2828 struct qedr_qp *qp,
2829 struct rdma_sq_send_wqe_1st *swqe,
2830 struct rdma_sq_send_wqe_2st *swqe2,
2831 struct ib_send_wr *wr,
2832 struct ib_send_wr **bad_wr)
2833{
2834 memset(swqe2, 0, sizeof(*swqe2));
2835 if (wr->send_flags & IB_SEND_INLINE) {
2836 u8 flags = 0;
2837
2838 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2839 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2840 bad_wr, &swqe->flags, flags);
2841 }
2842
2843 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2844}
2845
2846static int qedr_prepare_reg(struct qedr_qp *qp,
2847 struct rdma_sq_fmr_wqe_1st *fwqe1,
f696bf6d 2848 const struct ib_reg_wr *wr)
afa0e13b
RA
2849{
2850 struct qedr_mr *mr = get_qedr_mr(wr->mr);
2851 struct rdma_sq_fmr_wqe_2nd *fwqe2;
2852
2853 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2854 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2855 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2856 fwqe1->l_key = wr->key;
2857
08c4cf51
AR
2858 fwqe2->access_ctrl = 0;
2859
afa0e13b
RA
2860 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2861 !!(wr->access & IB_ACCESS_REMOTE_READ));
2862 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2863 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2864 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2865 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2866 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2867 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2868 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2869 fwqe2->fmr_ctrl = 0;
2870
2871 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2872 ilog2(mr->ibmr.page_size) - 12);
2873
2874 fwqe2->length_hi = 0;
2875 fwqe2->length_lo = mr->ibmr.length;
2876 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2877 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2878
2879 qp->wqe_wr_id[qp->sq.prod].mr = mr;
2880
2881 return 0;
2882}
2883
27a4b1a6 2884static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
afa0e13b
RA
2885{
2886 switch (opcode) {
2887 case IB_WR_RDMA_WRITE:
2888 case IB_WR_RDMA_WRITE_WITH_IMM:
2889 return IB_WC_RDMA_WRITE;
2890 case IB_WR_SEND_WITH_IMM:
2891 case IB_WR_SEND:
2892 case IB_WR_SEND_WITH_INV:
2893 return IB_WC_SEND;
2894 case IB_WR_RDMA_READ:
fb1a22be 2895 case IB_WR_RDMA_READ_WITH_INV:
afa0e13b
RA
2896 return IB_WC_RDMA_READ;
2897 case IB_WR_ATOMIC_CMP_AND_SWP:
2898 return IB_WC_COMP_SWAP;
2899 case IB_WR_ATOMIC_FETCH_AND_ADD:
2900 return IB_WC_FETCH_ADD;
2901 case IB_WR_REG_MR:
2902 return IB_WC_REG_MR;
2903 case IB_WR_LOCAL_INV:
2904 return IB_WC_LOCAL_INV;
2905 default:
2906 return IB_WC_SEND;
2907 }
2908}
2909
f696bf6d
BVA
2910static inline bool qedr_can_post_send(struct qedr_qp *qp,
2911 const struct ib_send_wr *wr)
afa0e13b
RA
2912{
2913 int wq_is_full, err_wr, pbl_is_full;
2914 struct qedr_dev *dev = qp->dev;
2915
2916 /* prevent SQ overflow and/or processing of a bad WR */
2917 err_wr = wr->num_sge > qp->sq.max_sges;
2918 wq_is_full = qedr_wq_is_full(&qp->sq);
2919 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2920 QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2921 if (wq_is_full || err_wr || pbl_is_full) {
2922 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2923 DP_ERR(dev,
2924 "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2925 qp);
2926 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2927 }
2928
2929 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2930 DP_ERR(dev,
2931 "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2932 qp);
2933 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2934 }
2935
2936 if (pbl_is_full &&
2937 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2938 DP_ERR(dev,
2939 "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2940 qp);
2941 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2942 }
2943 return false;
2944 }
2945 return true;
2946}
2947
27a4b1a6 2948static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
afa0e13b
RA
2949 struct ib_send_wr **bad_wr)
2950{
2951 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2952 struct qedr_qp *qp = get_qedr_qp(ibqp);
2953 struct rdma_sq_atomic_wqe_1st *awqe1;
2954 struct rdma_sq_atomic_wqe_2nd *awqe2;
2955 struct rdma_sq_atomic_wqe_3rd *awqe3;
2956 struct rdma_sq_send_wqe_2st *swqe2;
2957 struct rdma_sq_local_inv_wqe *iwqe;
2958 struct rdma_sq_rdma_wqe_2nd *rwqe2;
2959 struct rdma_sq_send_wqe_1st *swqe;
2960 struct rdma_sq_rdma_wqe_1st *rwqe;
2961 struct rdma_sq_fmr_wqe_1st *fwqe1;
2962 struct rdma_sq_common_wqe *wqe;
2963 u32 length;
2964 int rc = 0;
2965 bool comp;
2966
2967 if (!qedr_can_post_send(qp, wr)) {
2968 *bad_wr = wr;
2969 return -ENOMEM;
2970 }
2971
2972 wqe = qed_chain_produce(&qp->sq.pbl);
2973 qp->wqe_wr_id[qp->sq.prod].signaled =
2974 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2975
2976 wqe->flags = 0;
2977 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2978 !!(wr->send_flags & IB_SEND_SOLICITED));
2979 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2980 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2981 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2982 !!(wr->send_flags & IB_SEND_FENCE));
2983 wqe->prev_wqe_size = qp->prev_wqe_size;
2984
2985 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2986
2987 switch (wr->opcode) {
2988 case IB_WR_SEND_WITH_IMM:
551e1c67
KM
2989 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
2990 rc = -EINVAL;
2991 *bad_wr = wr;
2992 break;
2993 }
afa0e13b
RA
2994 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2995 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2996 swqe->wqe_size = 2;
2997 swqe2 = qed_chain_produce(&qp->sq.pbl);
2998
7bed7ebc 2999 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
afa0e13b
RA
3000 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3001 wr, bad_wr);
3002 swqe->length = cpu_to_le32(length);
3003 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3004 qp->prev_wqe_size = swqe->wqe_size;
3005 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3006 break;
3007 case IB_WR_SEND:
3008 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3009 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3010
3011 swqe->wqe_size = 2;
3012 swqe2 = qed_chain_produce(&qp->sq.pbl);
3013 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3014 wr, bad_wr);
3015 swqe->length = cpu_to_le32(length);
3016 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3017 qp->prev_wqe_size = swqe->wqe_size;
3018 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3019 break;
3020 case IB_WR_SEND_WITH_INV:
3021 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3022 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3023 swqe2 = qed_chain_produce(&qp->sq.pbl);
3024 swqe->wqe_size = 2;
3025 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3026 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3027 wr, bad_wr);
3028 swqe->length = cpu_to_le32(length);
3029 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3030 qp->prev_wqe_size = swqe->wqe_size;
3031 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3032 break;
3033
3034 case IB_WR_RDMA_WRITE_WITH_IMM:
551e1c67
KM
3035 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3036 rc = -EINVAL;
3037 *bad_wr = wr;
3038 break;
3039 }
afa0e13b
RA
3040 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3041 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3042
3043 rwqe->wqe_size = 2;
3044 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3045 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3046 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3047 wr, bad_wr);
3048 rwqe->length = cpu_to_le32(length);
3049 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3050 qp->prev_wqe_size = rwqe->wqe_size;
3051 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3052 break;
3053 case IB_WR_RDMA_WRITE:
3054 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3055 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3056
3057 rwqe->wqe_size = 2;
3058 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3059 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3060 wr, bad_wr);
3061 rwqe->length = cpu_to_le32(length);
3062 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3063 qp->prev_wqe_size = rwqe->wqe_size;
3064 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3065 break;
3066 case IB_WR_RDMA_READ_WITH_INV:
fb1a22be 3067 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
1b8a708b 3068 /* fallthrough -- same is identical to RDMA READ */
afa0e13b
RA
3069
3070 case IB_WR_RDMA_READ:
3071 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3072 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3073
3074 rwqe->wqe_size = 2;
3075 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3076 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3077 wr, bad_wr);
3078 rwqe->length = cpu_to_le32(length);
3079 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3080 qp->prev_wqe_size = rwqe->wqe_size;
3081 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3082 break;
3083
3084 case IB_WR_ATOMIC_CMP_AND_SWP:
3085 case IB_WR_ATOMIC_FETCH_AND_ADD:
3086 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3087 awqe1->wqe_size = 4;
3088
3089 awqe2 = qed_chain_produce(&qp->sq.pbl);
3090 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3091 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3092
3093 awqe3 = qed_chain_produce(&qp->sq.pbl);
3094
3095 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3096 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3097 DMA_REGPAIR_LE(awqe3->swap_data,
3098 atomic_wr(wr)->compare_add);
3099 } else {
3100 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3101 DMA_REGPAIR_LE(awqe3->swap_data,
3102 atomic_wr(wr)->swap);
3103 DMA_REGPAIR_LE(awqe3->cmp_data,
3104 atomic_wr(wr)->compare_add);
3105 }
3106
3107 qedr_prepare_sq_sges(qp, NULL, wr);
3108
3109 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3110 qp->prev_wqe_size = awqe1->wqe_size;
3111 break;
3112
3113 case IB_WR_LOCAL_INV:
3114 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3115 iwqe->wqe_size = 1;
3116
3117 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3118 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3119 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3120 qp->prev_wqe_size = iwqe->wqe_size;
3121 break;
3122 case IB_WR_REG_MR:
3123 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3124 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3125 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3126 fwqe1->wqe_size = 2;
3127
3128 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3129 if (rc) {
3130 DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3131 *bad_wr = wr;
3132 break;
3133 }
3134
3135 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3136 qp->prev_wqe_size = fwqe1->wqe_size;
3137 break;
3138 default:
3139 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3140 rc = -EINVAL;
3141 *bad_wr = wr;
3142 break;
3143 }
3144
3145 if (*bad_wr) {
3146 u16 value;
3147
3148 /* Restore prod to its position before
3149 * this WR was processed
3150 */
3151 value = le16_to_cpu(qp->sq.db_data.data.value);
3152 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3153
3154 /* Restore prev_wqe_size */
3155 qp->prev_wqe_size = wqe->prev_wqe_size;
3156 rc = -EINVAL;
3157 DP_ERR(dev, "POST SEND FAILED\n");
3158 }
3159
3160 return rc;
3161}
3162
3163int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3164 struct ib_send_wr **bad_wr)
3165{
3166 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3167 struct qedr_qp *qp = get_qedr_qp(ibqp);
3168 unsigned long flags;
3169 int rc = 0;
3170
3171 *bad_wr = NULL;
3172
04886779
RA
3173 if (qp->qp_type == IB_QPT_GSI)
3174 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3175
afa0e13b
RA
3176 spin_lock_irqsave(&qp->q_lock, flags);
3177
f5b1b177
KM
3178 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3179 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3180 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3181 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3182 spin_unlock_irqrestore(&qp->q_lock, flags);
3183 *bad_wr = wr;
3184 DP_DEBUG(dev, QEDR_MSG_CQ,
3185 "QP in wrong state! QP icid=0x%x state %d\n",
3186 qp->icid, qp->state);
3187 return -EINVAL;
3188 }
afa0e13b
RA
3189 }
3190
afa0e13b
RA
3191 while (wr) {
3192 rc = __qedr_post_send(ibqp, wr, bad_wr);
3193 if (rc)
3194 break;
3195
3196 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3197
3198 qedr_inc_sw_prod(&qp->sq);
3199
3200 qp->sq.db_data.data.value++;
3201
3202 wr = wr->next;
3203 }
3204
3205 /* Trigger doorbell
3206 * If there was a failure in the first WR then it will be triggered in
3207 * vane. However this is not harmful (as long as the producer value is
3208 * unchanged). For performance reasons we avoid checking for this
3209 * redundant doorbell.
09c4854f
KM
3210 *
3211 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3212 * soon as we give the doorbell, we could get a completion
3213 * for this wr, therefore we need to make sure that the
3214 * memory is updated before giving the doorbell.
3215 * During qedr_poll_cq, rmb is called before accessing the
3216 * cqe. This covers for the smp_rmb as well.
afa0e13b 3217 */
09c4854f 3218 smp_wmb();
afa0e13b
RA
3219 writel(qp->sq.db_data.raw, qp->sq.db);
3220
3221 /* Make sure write sticks */
3222 mmiowb();
3223
3224 spin_unlock_irqrestore(&qp->q_lock, flags);
3225
3226 return rc;
3227}
3228
3229int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3230 struct ib_recv_wr **bad_wr)
3231{
3232 struct qedr_qp *qp = get_qedr_qp(ibqp);
3233 struct qedr_dev *dev = qp->dev;
3234 unsigned long flags;
3235 int status = 0;
3236
04886779
RA
3237 if (qp->qp_type == IB_QPT_GSI)
3238 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3239
afa0e13b
RA
3240 spin_lock_irqsave(&qp->q_lock, flags);
3241
922d9a40 3242 if (qp->state == QED_ROCE_QP_STATE_RESET) {
afa0e13b
RA
3243 spin_unlock_irqrestore(&qp->q_lock, flags);
3244 *bad_wr = wr;
3245 return -EINVAL;
3246 }
3247
3248 while (wr) {
3249 int i;
3250
3251 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3252 QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3253 wr->num_sge > qp->rq.max_sges) {
3254 DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
3255 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3256 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3257 qp->rq.max_sges);
3258 status = -ENOMEM;
3259 *bad_wr = wr;
3260 break;
3261 }
3262 for (i = 0; i < wr->num_sge; i++) {
3263 u32 flags = 0;
3264 struct rdma_rq_sge *rqe =
3265 qed_chain_produce(&qp->rq.pbl);
3266
3267 /* First one must include the number
3268 * of SGE in the list
3269 */
3270 if (!i)
3271 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3272 wr->num_sge);
3273
d52c89f1 3274 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
afa0e13b
RA
3275 wr->sg_list[i].lkey);
3276
3277 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3278 wr->sg_list[i].length, flags);
3279 }
3280
3281 /* Special case of no sges. FW requires between 1-4 sges...
3282 * in this case we need to post 1 sge with length zero. this is
3283 * because rdma write with immediate consumes an RQ.
3284 */
3285 if (!wr->num_sge) {
3286 u32 flags = 0;
3287 struct rdma_rq_sge *rqe =
3288 qed_chain_produce(&qp->rq.pbl);
3289
3290 /* First one must include the number
3291 * of SGE in the list
3292 */
d52c89f1 3293 SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
afa0e13b
RA
3294 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3295
3296 RQ_SGE_SET(rqe, 0, 0, flags);
3297 i = 1;
3298 }
3299
3300 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3301 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3302
3303 qedr_inc_sw_prod(&qp->rq);
3304
09c4854f
KM
3305 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3306 * soon as we give the doorbell, we could get a completion
3307 * for this wr, therefore we need to make sure that the
3308 * memory is update before giving the doorbell.
3309 * During qedr_poll_cq, rmb is called before accessing the
3310 * cqe. This covers for the smp_rmb as well.
3311 */
3312 smp_wmb();
afa0e13b
RA
3313
3314 qp->rq.db_data.data.value++;
3315
3316 writel(qp->rq.db_data.raw, qp->rq.db);
3317
3318 /* Make sure write sticks */
3319 mmiowb();
3320
f5b1b177
KM
3321 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3322 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3323 mmiowb(); /* for second doorbell */
3324 }
3325
afa0e13b
RA
3326 wr = wr->next;
3327 }
3328
3329 spin_unlock_irqrestore(&qp->q_lock, flags);
3330
3331 return status;
3332}
3333
3334static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3335{
3336 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3337
3338 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3339 cq->pbl_toggle;
3340}
3341
3342static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3343{
3344 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3345 struct qedr_qp *qp;
3346
3347 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3348 resp_cqe->qp_handle.lo,
3349 u64);
3350 return qp;
3351}
3352
3353static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3354{
3355 struct rdma_cqe_requester *resp_cqe = &cqe->req;
3356
3357 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3358}
3359
3360/* Return latest CQE (needs processing) */
3361static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3362{
3363 return cq->latest_cqe;
3364}
3365
3366/* In fmr we need to increase the number of fmr completed counter for the fmr
3367 * algorithm determining whether we can free a pbl or not.
3368 * we need to perform this whether the work request was signaled or not. for
3369 * this purpose we call this function from the condition that checks if a wr
3370 * should be skipped, to make sure we don't miss it ( possibly this fmr
3371 * operation was not signalted)
3372 */
3373static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3374{
3375 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3376 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3377}
3378
3379static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3380 struct qedr_cq *cq, int num_entries,
3381 struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3382 int force)
3383{
3384 u16 cnt = 0;
3385
3386 while (num_entries && qp->sq.wqe_cons != hw_cons) {
3387 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3388 qedr_chk_if_fmr(qp);
3389 /* skip WC */
3390 goto next_cqe;
3391 }
3392
3393 /* fill WC */
3394 wc->status = status;
27035a1b 3395 wc->vendor_err = 0;
afa0e13b
RA
3396 wc->wc_flags = 0;
3397 wc->src_qp = qp->id;
3398 wc->qp = &qp->ibqp;
3399
3400 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3401 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3402
3403 switch (wc->opcode) {
3404 case IB_WC_RDMA_WRITE:
3405 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3406 break;
3407 case IB_WC_COMP_SWAP:
3408 case IB_WC_FETCH_ADD:
3409 wc->byte_len = 8;
3410 break;
3411 case IB_WC_REG_MR:
3412 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3413 break;
dac27386
MK
3414 case IB_WC_RDMA_READ:
3415 case IB_WC_SEND:
3416 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3417 break;
afa0e13b
RA
3418 default:
3419 break;
3420 }
3421
3422 num_entries--;
3423 wc++;
3424 cnt++;
3425next_cqe:
3426 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3427 qed_chain_consume(&qp->sq.pbl);
3428 qedr_inc_sw_cons(&qp->sq);
3429 }
3430
3431 return cnt;
3432}
3433
3434static int qedr_poll_cq_req(struct qedr_dev *dev,
3435 struct qedr_qp *qp, struct qedr_cq *cq,
3436 int num_entries, struct ib_wc *wc,
3437 struct rdma_cqe_requester *req)
3438{
3439 int cnt = 0;
3440
3441 switch (req->status) {
3442 case RDMA_CQE_REQ_STS_OK:
3443 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3444 IB_WC_SUCCESS, 0);
3445 break;
3446 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
c78c3149 3447 if (qp->state != QED_ROCE_QP_STATE_ERR)
dc728f77
KM
3448 DP_DEBUG(dev, QEDR_MSG_CQ,
3449 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3450 cq->icid, qp->icid);
afa0e13b 3451 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
74c3875c 3452 IB_WC_WR_FLUSH_ERR, 1);
afa0e13b
RA
3453 break;
3454 default:
3455 /* process all WQE before the cosumer */
3456 qp->state = QED_ROCE_QP_STATE_ERR;
3457 cnt = process_req(dev, qp, cq, num_entries, wc,
3458 req->sq_cons - 1, IB_WC_SUCCESS, 0);
3459 wc += cnt;
3460 /* if we have extra WC fill it with actual error info */
3461 if (cnt < num_entries) {
3462 enum ib_wc_status wc_status;
3463
3464 switch (req->status) {
3465 case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3466 DP_ERR(dev,
3467 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3468 cq->icid, qp->icid);
3469 wc_status = IB_WC_BAD_RESP_ERR;
3470 break;
3471 case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3472 DP_ERR(dev,
3473 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3474 cq->icid, qp->icid);
3475 wc_status = IB_WC_LOC_LEN_ERR;
3476 break;
3477 case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3478 DP_ERR(dev,
3479 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3480 cq->icid, qp->icid);
3481 wc_status = IB_WC_LOC_QP_OP_ERR;
3482 break;
3483 case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3484 DP_ERR(dev,
3485 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3486 cq->icid, qp->icid);
3487 wc_status = IB_WC_LOC_PROT_ERR;
3488 break;
3489 case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3490 DP_ERR(dev,
3491 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3492 cq->icid, qp->icid);
3493 wc_status = IB_WC_MW_BIND_ERR;
3494 break;
3495 case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3496 DP_ERR(dev,
3497 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3498 cq->icid, qp->icid);
3499 wc_status = IB_WC_REM_INV_REQ_ERR;
3500 break;
3501 case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3502 DP_ERR(dev,
3503 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3504 cq->icid, qp->icid);
3505 wc_status = IB_WC_REM_ACCESS_ERR;
3506 break;
3507 case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3508 DP_ERR(dev,
3509 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3510 cq->icid, qp->icid);
3511 wc_status = IB_WC_REM_OP_ERR;
3512 break;
3513 case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3514 DP_ERR(dev,
3515 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3516 cq->icid, qp->icid);
3517 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3518 break;
3519 case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3520 DP_ERR(dev,
3521 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3522 cq->icid, qp->icid);
3523 wc_status = IB_WC_RETRY_EXC_ERR;
3524 break;
3525 default:
3526 DP_ERR(dev,
3527 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3528 cq->icid, qp->icid);
3529 wc_status = IB_WC_GENERAL_ERR;
3530 }
3531 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3532 wc_status, 1);
3533 }
3534 }
3535
3536 return cnt;
3537}
3538
b6acd71f 3539static inline int qedr_cqe_resp_status_to_ib(u8 status)
afa0e13b 3540{
b6acd71f 3541 switch (status) {
afa0e13b 3542 case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
b6acd71f 3543 return IB_WC_LOC_ACCESS_ERR;
afa0e13b 3544 case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
b6acd71f 3545 return IB_WC_LOC_LEN_ERR;
afa0e13b 3546 case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
b6acd71f 3547 return IB_WC_LOC_QP_OP_ERR;
afa0e13b 3548 case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
b6acd71f 3549 return IB_WC_LOC_PROT_ERR;
afa0e13b 3550 case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
b6acd71f 3551 return IB_WC_MW_BIND_ERR;
afa0e13b 3552 case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
b6acd71f 3553 return IB_WC_REM_INV_RD_REQ_ERR;
afa0e13b 3554 case RDMA_CQE_RESP_STS_OK:
b6acd71f
AR
3555 return IB_WC_SUCCESS;
3556 default:
3557 return IB_WC_GENERAL_ERR;
3558 }
3559}
afa0e13b 3560
b6acd71f
AR
3561static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
3562 struct ib_wc *wc)
3563{
3564 wc->status = IB_WC_SUCCESS;
3565 wc->byte_len = le32_to_cpu(resp->length);
afa0e13b 3566
b6acd71f 3567 if (resp->flags & QEDR_RESP_IMM) {
7bed7ebc 3568 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
b6acd71f
AR
3569 wc->wc_flags |= IB_WC_WITH_IMM;
3570
3571 if (resp->flags & QEDR_RESP_RDMA)
afa0e13b
RA
3572 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3573
b6acd71f
AR
3574 if (resp->flags & QEDR_RESP_INV)
3575 return -EINVAL;
3576
3577 } else if (resp->flags & QEDR_RESP_INV) {
3578 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
3579 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3580
3581 if (resp->flags & QEDR_RESP_RDMA)
3582 return -EINVAL;
3583
3584 } else if (resp->flags & QEDR_RESP_RDMA) {
3585 return -EINVAL;
3586 }
3587
3588 return 0;
3589}
3590
3591static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3592 struct qedr_cq *cq, struct ib_wc *wc,
3593 struct rdma_cqe_responder *resp, u64 wr_id)
3594{
3595 /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
3596 wc->opcode = IB_WC_RECV;
3597 wc->wc_flags = 0;
3598
3599 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
3600 if (qedr_set_ok_cqe_resp_wc(resp, wc))
3601 DP_ERR(dev,
3602 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
3603 cq, cq->icid, resp->flags);
3604
3605 } else {
3606 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
3607 if (wc->status == IB_WC_GENERAL_ERR)
3608 DP_ERR(dev,
3609 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
3610 cq, cq->icid, resp->status);
afa0e13b
RA
3611 }
3612
b6acd71f 3613 /* Fill the rest of the WC */
27035a1b 3614 wc->vendor_err = 0;
afa0e13b
RA
3615 wc->src_qp = qp->id;
3616 wc->qp = &qp->ibqp;
3617 wc->wr_id = wr_id;
3618}
3619
3620static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3621 struct qedr_cq *cq, struct ib_wc *wc,
3622 struct rdma_cqe_responder *resp)
3623{
3624 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3625
3626 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3627
3628 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3629 qed_chain_consume(&qp->rq.pbl);
3630 qedr_inc_sw_cons(&qp->rq);
3631
3632 return 1;
3633}
3634
3635static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3636 int num_entries, struct ib_wc *wc, u16 hw_cons)
3637{
3638 u16 cnt = 0;
3639
3640 while (num_entries && qp->rq.wqe_cons != hw_cons) {
3641 /* fill WC */
3642 wc->status = IB_WC_WR_FLUSH_ERR;
27035a1b 3643 wc->vendor_err = 0;
afa0e13b
RA
3644 wc->wc_flags = 0;
3645 wc->src_qp = qp->id;
3646 wc->byte_len = 0;
3647 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3648 wc->qp = &qp->ibqp;
3649 num_entries--;
3650 wc++;
3651 cnt++;
3652 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3653 qed_chain_consume(&qp->rq.pbl);
3654 qedr_inc_sw_cons(&qp->rq);
3655 }
3656
3657 return cnt;
3658}
3659
3660static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3661 struct rdma_cqe_responder *resp, int *update)
3662{
50bc60cb 3663 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
afa0e13b
RA
3664 consume_cqe(cq);
3665 *update |= 1;
3666 }
3667}
3668
3669static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3670 struct qedr_cq *cq, int num_entries,
3671 struct ib_wc *wc, struct rdma_cqe_responder *resp,
3672 int *update)
3673{
3674 int cnt;
3675
3676 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3677 cnt = process_resp_flush(qp, cq, num_entries, wc,
50bc60cb 3678 resp->rq_cons_or_srq_id);
afa0e13b
RA
3679 try_consume_resp_cqe(cq, qp, resp, update);
3680 } else {
3681 cnt = process_resp_one(dev, qp, cq, wc, resp);
3682 consume_cqe(cq);
3683 *update |= 1;
3684 }
3685
3686 return cnt;
3687}
3688
3689static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3690 struct rdma_cqe_requester *req, int *update)
3691{
3692 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3693 consume_cqe(cq);
3694 *update |= 1;
3695 }
3696}
3697
3698int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3699{
3700 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3701 struct qedr_cq *cq = get_qedr_cq(ibcq);
e3fd112c 3702 union rdma_cqe *cqe;
afa0e13b
RA
3703 u32 old_cons, new_cons;
3704 unsigned long flags;
3705 int update = 0;
3706 int done = 0;
3707
4dd72636
AR
3708 if (cq->destroyed) {
3709 DP_ERR(dev,
3710 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
3711 cq, cq->icid);
3712 return 0;
3713 }
3714
04886779
RA
3715 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3716 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3717
afa0e13b 3718 spin_lock_irqsave(&cq->cq_lock, flags);
e3fd112c 3719 cqe = cq->latest_cqe;
afa0e13b
RA
3720 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3721 while (num_entries && is_valid_cqe(cq, cqe)) {
3722 struct qedr_qp *qp;
3723 int cnt = 0;
3724
3725 /* prevent speculative reads of any field of CQE */
3726 rmb();
3727
3728 qp = cqe_get_qp(cqe);
3729 if (!qp) {
3730 WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3731 break;
3732 }
3733
3734 wc->qp = &qp->ibqp;
3735
3736 switch (cqe_get_type(cqe)) {
3737 case RDMA_CQE_TYPE_REQUESTER:
3738 cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3739 &cqe->req);
3740 try_consume_req_cqe(cq, qp, &cqe->req, &update);
3741 break;
3742 case RDMA_CQE_TYPE_RESPONDER_RQ:
3743 cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3744 &cqe->resp, &update);
3745 break;
3746 case RDMA_CQE_TYPE_INVALID:
3747 default:
3748 DP_ERR(dev, "Error: invalid CQE type = %d\n",
3749 cqe_get_type(cqe));
3750 }
3751 num_entries -= cnt;
3752 wc += cnt;
3753 done += cnt;
3754
3755 cqe = get_cqe(cq);
3756 }
3757 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3758
3759 cq->cq_cons += new_cons - old_cons;
3760
3761 if (update)
3762 /* doorbell notifies abount latest VALID entry,
3763 * but chain already point to the next INVALID one
3764 */
3765 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3766
3767 spin_unlock_irqrestore(&cq->cq_lock, flags);
3768 return done;
3769}
993d1b52
RA
3770
3771int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3772 u8 port_num,
3773 const struct ib_wc *in_wc,
3774 const struct ib_grh *in_grh,
3775 const struct ib_mad_hdr *mad_hdr,
3776 size_t in_mad_size, struct ib_mad_hdr *out_mad,
3777 size_t *out_mad_size, u16 *out_mad_pkey_index)
3778{
3779 struct qedr_dev *dev = get_qedr_dev(ibdev);
3780
3781 DP_DEBUG(dev, QEDR_MSG_GSI,
3782 "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3783 mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3784 mad_hdr->class_specific, mad_hdr->class_version,
3785 mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3786 return IB_MAD_RESULT_SUCCESS;
3787}