RDMA/ocrdma: Remove hardcoding of the max DPP QPs supported
[linux-2.6-block.git] / drivers / infiniband / hw / ocrdma / ocrdma_verbs.c
1 /*******************************************************************
2  * This file is part of the Emulex RoCE Device Driver for          *
3  * RoCE (RDMA over Converged Ethernet) adapters.                   *
4  * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *
20  * Contact Information:
21  * linux-drivers@emulex.com
22  *
23  * Emulex
24  * 3333 Susan Street
25  * Costa Mesa, CA 92626
26  *******************************************************************/
27
28 #include <linux/dma-mapping.h>
29 #include <rdma/ib_verbs.h>
30 #include <rdma/ib_user_verbs.h>
31 #include <rdma/iw_cm.h>
32 #include <rdma/ib_umem.h>
33 #include <rdma/ib_addr.h>
34
35 #include "ocrdma.h"
36 #include "ocrdma_hw.h"
37 #include "ocrdma_verbs.h"
38 #include "ocrdma_abi.h"
39
40 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
41 {
42         if (index > 1)
43                 return -EINVAL;
44
45         *pkey = 0xffff;
46         return 0;
47 }
48
49 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
50                      int index, union ib_gid *sgid)
51 {
52         struct ocrdma_dev *dev;
53
54         dev = get_ocrdma_dev(ibdev);
55         memset(sgid, 0, sizeof(*sgid));
56         if (index > OCRDMA_MAX_SGID)
57                 return -EINVAL;
58
59         memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
60
61         return 0;
62 }
63
64 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
65 {
66         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
67
68         memset(attr, 0, sizeof *attr);
69         memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
70                min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
71         ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
72         attr->max_mr_size = ~0ull;
73         attr->page_size_cap = 0xffff000;
74         attr->vendor_id = dev->nic_info.pdev->vendor;
75         attr->vendor_part_id = dev->nic_info.pdev->device;
76         attr->hw_ver = 0;
77         attr->max_qp = dev->attr.max_qp;
78         attr->max_ah = OCRDMA_MAX_AH;
79         attr->max_qp_wr = dev->attr.max_wqe;
80
81         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
82                                         IB_DEVICE_RC_RNR_NAK_GEN |
83                                         IB_DEVICE_SHUTDOWN_PORT |
84                                         IB_DEVICE_SYS_IMAGE_GUID |
85                                         IB_DEVICE_LOCAL_DMA_LKEY |
86                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
87         attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
88         attr->max_sge_rd = 0;
89         attr->max_cq = dev->attr.max_cq;
90         attr->max_cqe = dev->attr.max_cqe;
91         attr->max_mr = dev->attr.max_mr;
92         attr->max_mw = dev->attr.max_mw;
93         attr->max_pd = dev->attr.max_pd;
94         attr->atomic_cap = 0;
95         attr->max_fmr = 0;
96         attr->max_map_per_fmr = 0;
97         attr->max_qp_rd_atom =
98             min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
99         attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
100         attr->max_srq = dev->attr.max_srq;
101         attr->max_srq_sge = dev->attr.max_srq_sge;
102         attr->max_srq_wr = dev->attr.max_rqe;
103         attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
104         attr->max_fast_reg_page_list_len = 0;
105         attr->max_pkeys = 1;
106         return 0;
107 }
108
109 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
110                                             u8 *ib_speed, u8 *ib_width)
111 {
112         int status;
113         u8 speed;
114
115         status = ocrdma_mbx_get_link_speed(dev, &speed);
116         if (status)
117                 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
118
119         switch (speed) {
120         case OCRDMA_PHYS_LINK_SPEED_1GBPS:
121                 *ib_speed = IB_SPEED_SDR;
122                 *ib_width = IB_WIDTH_1X;
123                 break;
124
125         case OCRDMA_PHYS_LINK_SPEED_10GBPS:
126                 *ib_speed = IB_SPEED_QDR;
127                 *ib_width = IB_WIDTH_1X;
128                 break;
129
130         case OCRDMA_PHYS_LINK_SPEED_20GBPS:
131                 *ib_speed = IB_SPEED_DDR;
132                 *ib_width = IB_WIDTH_4X;
133                 break;
134
135         case OCRDMA_PHYS_LINK_SPEED_40GBPS:
136                 *ib_speed = IB_SPEED_QDR;
137                 *ib_width = IB_WIDTH_4X;
138                 break;
139
140         default:
141                 /* Unsupported */
142                 *ib_speed = IB_SPEED_SDR;
143                 *ib_width = IB_WIDTH_1X;
144         }
145 }
146
147 int ocrdma_query_port(struct ib_device *ibdev,
148                       u8 port, struct ib_port_attr *props)
149 {
150         enum ib_port_state port_state;
151         struct ocrdma_dev *dev;
152         struct net_device *netdev;
153
154         dev = get_ocrdma_dev(ibdev);
155         if (port > 1) {
156                 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
157                        dev->id, port);
158                 return -EINVAL;
159         }
160         netdev = dev->nic_info.netdev;
161         if (netif_running(netdev) && netif_oper_up(netdev)) {
162                 port_state = IB_PORT_ACTIVE;
163                 props->phys_state = 5;
164         } else {
165                 port_state = IB_PORT_DOWN;
166                 props->phys_state = 3;
167         }
168         props->max_mtu = IB_MTU_4096;
169         props->active_mtu = iboe_get_mtu(netdev->mtu);
170         props->lid = 0;
171         props->lmc = 0;
172         props->sm_lid = 0;
173         props->sm_sl = 0;
174         props->state = port_state;
175         props->port_cap_flags =
176             IB_PORT_CM_SUP |
177             IB_PORT_REINIT_SUP |
178             IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
179         props->gid_tbl_len = OCRDMA_MAX_SGID;
180         props->pkey_tbl_len = 1;
181         props->bad_pkey_cntr = 0;
182         props->qkey_viol_cntr = 0;
183         get_link_speed_and_width(dev, &props->active_speed,
184                                  &props->active_width);
185         props->max_msg_sz = 0x80000000;
186         props->max_vl_num = 4;
187         return 0;
188 }
189
190 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
191                        struct ib_port_modify *props)
192 {
193         struct ocrdma_dev *dev;
194
195         dev = get_ocrdma_dev(ibdev);
196         if (port > 1) {
197                 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
198                 return -EINVAL;
199         }
200         return 0;
201 }
202
203 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
204                            unsigned long len)
205 {
206         struct ocrdma_mm *mm;
207
208         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
209         if (mm == NULL)
210                 return -ENOMEM;
211         mm->key.phy_addr = phy_addr;
212         mm->key.len = len;
213         INIT_LIST_HEAD(&mm->entry);
214
215         mutex_lock(&uctx->mm_list_lock);
216         list_add_tail(&mm->entry, &uctx->mm_head);
217         mutex_unlock(&uctx->mm_list_lock);
218         return 0;
219 }
220
221 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
222                             unsigned long len)
223 {
224         struct ocrdma_mm *mm, *tmp;
225
226         mutex_lock(&uctx->mm_list_lock);
227         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
228                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
229                         continue;
230
231                 list_del(&mm->entry);
232                 kfree(mm);
233                 break;
234         }
235         mutex_unlock(&uctx->mm_list_lock);
236 }
237
238 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
239                               unsigned long len)
240 {
241         bool found = false;
242         struct ocrdma_mm *mm;
243
244         mutex_lock(&uctx->mm_list_lock);
245         list_for_each_entry(mm, &uctx->mm_head, entry) {
246                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
247                         continue;
248
249                 found = true;
250                 break;
251         }
252         mutex_unlock(&uctx->mm_list_lock);
253         return found;
254 }
255
256 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
257                                           struct ocrdma_ucontext *uctx,
258                                           struct ib_udata *udata)
259 {
260         struct ocrdma_pd *pd = NULL;
261         int status = 0;
262
263         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
264         if (!pd)
265                 return ERR_PTR(-ENOMEM);
266
267         if (udata && uctx) {
268                 pd->dpp_enabled =
269                         ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
270                 pd->num_dpp_qp =
271                         pd->dpp_enabled ? (dev->nic_info.db_page_size /
272                                            dev->attr.wqe_size) : 0;
273         }
274
275 retry:
276         status = ocrdma_mbx_alloc_pd(dev, pd);
277         if (status) {
278                 if (pd->dpp_enabled) {
279                         pd->dpp_enabled = false;
280                         pd->num_dpp_qp = 0;
281                         goto retry;
282                 } else {
283                         kfree(pd);
284                         return ERR_PTR(status);
285                 }
286         }
287
288         return pd;
289 }
290
291 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
292                                  struct ocrdma_pd *pd)
293 {
294         return (uctx->cntxt_pd == pd ? true : false);
295 }
296
297 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
298                               struct ocrdma_pd *pd)
299 {
300         int status = 0;
301
302         status = ocrdma_mbx_dealloc_pd(dev, pd);
303         kfree(pd);
304         return status;
305 }
306
307 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
308                                     struct ocrdma_ucontext *uctx,
309                                     struct ib_udata *udata)
310 {
311         int status = 0;
312
313         uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
314         if (IS_ERR(uctx->cntxt_pd)) {
315                 status = PTR_ERR(uctx->cntxt_pd);
316                 uctx->cntxt_pd = NULL;
317                 goto err;
318         }
319
320         uctx->cntxt_pd->uctx = uctx;
321         uctx->cntxt_pd->ibpd.device = &dev->ibdev;
322 err:
323         return status;
324 }
325
326 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
327 {
328         int status = 0;
329         struct ocrdma_pd *pd = uctx->cntxt_pd;
330         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
331
332         BUG_ON(uctx->pd_in_use);
333         uctx->cntxt_pd = NULL;
334         status = _ocrdma_dealloc_pd(dev, pd);
335         return status;
336 }
337
338 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
339 {
340         struct ocrdma_pd *pd = NULL;
341
342         mutex_lock(&uctx->mm_list_lock);
343         if (!uctx->pd_in_use) {
344                 uctx->pd_in_use = true;
345                 pd = uctx->cntxt_pd;
346         }
347         mutex_unlock(&uctx->mm_list_lock);
348
349         return pd;
350 }
351
352 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
353 {
354         mutex_lock(&uctx->mm_list_lock);
355         uctx->pd_in_use = false;
356         mutex_unlock(&uctx->mm_list_lock);
357 }
358
359 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
360                                           struct ib_udata *udata)
361 {
362         int status;
363         struct ocrdma_ucontext *ctx;
364         struct ocrdma_alloc_ucontext_resp resp;
365         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
366         struct pci_dev *pdev = dev->nic_info.pdev;
367         u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
368
369         if (!udata)
370                 return ERR_PTR(-EFAULT);
371         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
372         if (!ctx)
373                 return ERR_PTR(-ENOMEM);
374         INIT_LIST_HEAD(&ctx->mm_head);
375         mutex_init(&ctx->mm_list_lock);
376
377         ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
378                                             &ctx->ah_tbl.pa, GFP_KERNEL);
379         if (!ctx->ah_tbl.va) {
380                 kfree(ctx);
381                 return ERR_PTR(-ENOMEM);
382         }
383         memset(ctx->ah_tbl.va, 0, map_len);
384         ctx->ah_tbl.len = map_len;
385
386         memset(&resp, 0, sizeof(resp));
387         resp.ah_tbl_len = ctx->ah_tbl.len;
388         resp.ah_tbl_page = ctx->ah_tbl.pa;
389
390         status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
391         if (status)
392                 goto map_err;
393
394         status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
395         if (status)
396                 goto pd_err;
397
398         resp.dev_id = dev->id;
399         resp.max_inline_data = dev->attr.max_inline_data;
400         resp.wqe_size = dev->attr.wqe_size;
401         resp.rqe_size = dev->attr.rqe_size;
402         resp.dpp_wqe_size = dev->attr.wqe_size;
403
404         memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
405         status = ib_copy_to_udata(udata, &resp, sizeof(resp));
406         if (status)
407                 goto cpy_err;
408         return &ctx->ibucontext;
409
410 cpy_err:
411 pd_err:
412         ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
413 map_err:
414         dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
415                           ctx->ah_tbl.pa);
416         kfree(ctx);
417         return ERR_PTR(status);
418 }
419
420 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
421 {
422         int status = 0;
423         struct ocrdma_mm *mm, *tmp;
424         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
425         struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
426         struct pci_dev *pdev = dev->nic_info.pdev;
427
428         status = ocrdma_dealloc_ucontext_pd(uctx);
429
430         ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
431         dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
432                           uctx->ah_tbl.pa);
433
434         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
435                 list_del(&mm->entry);
436                 kfree(mm);
437         }
438         kfree(uctx);
439         return status;
440 }
441
442 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
443 {
444         struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
445         struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
446         unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
447         u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
448         unsigned long len = (vma->vm_end - vma->vm_start);
449         int status = 0;
450         bool found;
451
452         if (vma->vm_start & (PAGE_SIZE - 1))
453                 return -EINVAL;
454         found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
455         if (!found)
456                 return -EINVAL;
457
458         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
459                 dev->nic_info.db_total_size)) &&
460                 (len <= dev->nic_info.db_page_size)) {
461                 if (vma->vm_flags & VM_READ)
462                         return -EPERM;
463
464                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
465                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
466                                             len, vma->vm_page_prot);
467         } else if (dev->nic_info.dpp_unmapped_len &&
468                 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
469                 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
470                         dev->nic_info.dpp_unmapped_len)) &&
471                 (len <= dev->nic_info.dpp_unmapped_len)) {
472                 if (vma->vm_flags & VM_READ)
473                         return -EPERM;
474
475                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
476                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
477                                             len, vma->vm_page_prot);
478         } else {
479                 status = remap_pfn_range(vma, vma->vm_start,
480                                          vma->vm_pgoff, len, vma->vm_page_prot);
481         }
482         return status;
483 }
484
485 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
486                                 struct ib_ucontext *ib_ctx,
487                                 struct ib_udata *udata)
488 {
489         int status;
490         u64 db_page_addr;
491         u64 dpp_page_addr = 0;
492         u32 db_page_size;
493         struct ocrdma_alloc_pd_uresp rsp;
494         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
495
496         memset(&rsp, 0, sizeof(rsp));
497         rsp.id = pd->id;
498         rsp.dpp_enabled = pd->dpp_enabled;
499         db_page_addr = ocrdma_get_db_addr(dev, pd->id);
500         db_page_size = dev->nic_info.db_page_size;
501
502         status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
503         if (status)
504                 return status;
505
506         if (pd->dpp_enabled) {
507                 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
508                                 (pd->id * PAGE_SIZE);
509                 status = ocrdma_add_mmap(uctx, dpp_page_addr,
510                                  PAGE_SIZE);
511                 if (status)
512                         goto dpp_map_err;
513                 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
514                 rsp.dpp_page_addr_lo = dpp_page_addr;
515         }
516
517         status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
518         if (status)
519                 goto ucopy_err;
520
521         pd->uctx = uctx;
522         return 0;
523
524 ucopy_err:
525         if (pd->dpp_enabled)
526                 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
527 dpp_map_err:
528         ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
529         return status;
530 }
531
532 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
533                               struct ib_ucontext *context,
534                               struct ib_udata *udata)
535 {
536         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
537         struct ocrdma_pd *pd;
538         struct ocrdma_ucontext *uctx = NULL;
539         int status;
540         u8 is_uctx_pd = false;
541
542         if (udata && context) {
543                 uctx = get_ocrdma_ucontext(context);
544                 pd = ocrdma_get_ucontext_pd(uctx);
545                 if (pd) {
546                         is_uctx_pd = true;
547                         goto pd_mapping;
548                 }
549         }
550
551         pd = _ocrdma_alloc_pd(dev, uctx, udata);
552         if (IS_ERR(pd)) {
553                 status = PTR_ERR(pd);
554                 goto exit;
555         }
556
557 pd_mapping:
558         if (udata && context) {
559                 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
560                 if (status)
561                         goto err;
562         }
563         return &pd->ibpd;
564
565 err:
566         if (is_uctx_pd) {
567                 ocrdma_release_ucontext_pd(uctx);
568         } else {
569                 status = ocrdma_mbx_dealloc_pd(dev, pd);
570                 kfree(pd);
571         }
572 exit:
573         return ERR_PTR(status);
574 }
575
576 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
577 {
578         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
579         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
580         struct ocrdma_ucontext *uctx = NULL;
581         int status = 0;
582         u64 usr_db;
583
584         uctx = pd->uctx;
585         if (uctx) {
586                 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
587                         (pd->id * PAGE_SIZE);
588                 if (pd->dpp_enabled)
589                         ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
590                 usr_db = ocrdma_get_db_addr(dev, pd->id);
591                 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
592
593                 if (is_ucontext_pd(uctx, pd)) {
594                         ocrdma_release_ucontext_pd(uctx);
595                         return status;
596                 }
597         }
598         status = _ocrdma_dealloc_pd(dev, pd);
599         return status;
600 }
601
602 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
603                             u32 pdid, int acc, u32 num_pbls, u32 addr_check)
604 {
605         int status;
606
607         mr->hwmr.fr_mr = 0;
608         mr->hwmr.local_rd = 1;
609         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
610         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
611         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
612         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
613         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
614         mr->hwmr.num_pbls = num_pbls;
615
616         status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
617         if (status)
618                 return status;
619
620         mr->ibmr.lkey = mr->hwmr.lkey;
621         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
622                 mr->ibmr.rkey = mr->hwmr.lkey;
623         return 0;
624 }
625
626 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
627 {
628         int status;
629         struct ocrdma_mr *mr;
630         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
631         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
632
633         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
634                 pr_err("%s err, invalid access rights\n", __func__);
635                 return ERR_PTR(-EINVAL);
636         }
637
638         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
639         if (!mr)
640                 return ERR_PTR(-ENOMEM);
641
642         status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
643                                    OCRDMA_ADDR_CHECK_DISABLE);
644         if (status) {
645                 kfree(mr);
646                 return ERR_PTR(status);
647         }
648
649         return &mr->ibmr;
650 }
651
652 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
653                                    struct ocrdma_hw_mr *mr)
654 {
655         struct pci_dev *pdev = dev->nic_info.pdev;
656         int i = 0;
657
658         if (mr->pbl_table) {
659                 for (i = 0; i < mr->num_pbls; i++) {
660                         if (!mr->pbl_table[i].va)
661                                 continue;
662                         dma_free_coherent(&pdev->dev, mr->pbl_size,
663                                           mr->pbl_table[i].va,
664                                           mr->pbl_table[i].pa);
665                 }
666                 kfree(mr->pbl_table);
667                 mr->pbl_table = NULL;
668         }
669 }
670
671 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
672                               u32 num_pbes)
673 {
674         u32 num_pbls = 0;
675         u32 idx = 0;
676         int status = 0;
677         u32 pbl_size;
678
679         do {
680                 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
681                 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
682                         status = -EFAULT;
683                         break;
684                 }
685                 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
686                 num_pbls = num_pbls / (pbl_size / sizeof(u64));
687                 idx++;
688         } while (num_pbls >= dev->attr.max_num_mr_pbl);
689
690         mr->hwmr.num_pbes = num_pbes;
691         mr->hwmr.num_pbls = num_pbls;
692         mr->hwmr.pbl_size = pbl_size;
693         return status;
694 }
695
696 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
697 {
698         int status = 0;
699         int i;
700         u32 dma_len = mr->pbl_size;
701         struct pci_dev *pdev = dev->nic_info.pdev;
702         void *va;
703         dma_addr_t pa;
704
705         mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
706                                 mr->num_pbls, GFP_KERNEL);
707
708         if (!mr->pbl_table)
709                 return -ENOMEM;
710
711         for (i = 0; i < mr->num_pbls; i++) {
712                 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
713                 if (!va) {
714                         ocrdma_free_mr_pbl_tbl(dev, mr);
715                         status = -ENOMEM;
716                         break;
717                 }
718                 memset(va, 0, dma_len);
719                 mr->pbl_table[i].va = va;
720                 mr->pbl_table[i].pa = pa;
721         }
722         return status;
723 }
724
725 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
726                             u32 num_pbes)
727 {
728         struct ocrdma_pbe *pbe;
729         struct scatterlist *sg;
730         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
731         struct ib_umem *umem = mr->umem;
732         int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
733
734         if (!mr->hwmr.num_pbes)
735                 return;
736
737         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
738         pbe_cnt = 0;
739
740         shift = ilog2(umem->page_size);
741
742         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
743                 pages = sg_dma_len(sg) >> shift;
744                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
745                         /* store the page address in pbe */
746                         pbe->pa_lo =
747                             cpu_to_le32(sg_dma_address
748                                         (sg) +
749                                         (umem->page_size * pg_cnt));
750                         pbe->pa_hi =
751                             cpu_to_le32(upper_32_bits
752                                         ((sg_dma_address
753                                           (sg) +
754                                           umem->page_size * pg_cnt)));
755                         pbe_cnt += 1;
756                         total_num_pbes += 1;
757                         pbe++;
758
759                         /* if done building pbes, issue the mbx cmd. */
760                         if (total_num_pbes == num_pbes)
761                                 return;
762
763                         /* if the given pbl is full storing the pbes,
764                          * move to next pbl.
765                          */
766                         if (pbe_cnt ==
767                                 (mr->hwmr.pbl_size / sizeof(u64))) {
768                                 pbl_tbl++;
769                                 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
770                                 pbe_cnt = 0;
771                         }
772
773                 }
774         }
775 }
776
777 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
778                                  u64 usr_addr, int acc, struct ib_udata *udata)
779 {
780         int status = -ENOMEM;
781         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
782         struct ocrdma_mr *mr;
783         struct ocrdma_pd *pd;
784         u32 num_pbes;
785
786         pd = get_ocrdma_pd(ibpd);
787
788         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
789                 return ERR_PTR(-EINVAL);
790
791         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
792         if (!mr)
793                 return ERR_PTR(status);
794         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
795         if (IS_ERR(mr->umem)) {
796                 status = -EFAULT;
797                 goto umem_err;
798         }
799         num_pbes = ib_umem_page_count(mr->umem);
800         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
801         if (status)
802                 goto umem_err;
803
804         mr->hwmr.pbe_size = mr->umem->page_size;
805         mr->hwmr.fbo = mr->umem->offset;
806         mr->hwmr.va = usr_addr;
807         mr->hwmr.len = len;
808         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
809         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
810         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
811         mr->hwmr.local_rd = 1;
812         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
813         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
814         if (status)
815                 goto umem_err;
816         build_user_pbes(dev, mr, num_pbes);
817         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
818         if (status)
819                 goto mbx_err;
820         mr->ibmr.lkey = mr->hwmr.lkey;
821         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
822                 mr->ibmr.rkey = mr->hwmr.lkey;
823
824         return &mr->ibmr;
825
826 mbx_err:
827         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
828 umem_err:
829         kfree(mr);
830         return ERR_PTR(status);
831 }
832
833 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
834 {
835         struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
836         struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
837         int status;
838
839         status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
840
841         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
842
843         /* it could be user registered memory. */
844         if (mr->umem)
845                 ib_umem_release(mr->umem);
846         kfree(mr);
847         return status;
848 }
849
850 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
851                                 struct ib_udata *udata,
852                                 struct ib_ucontext *ib_ctx)
853 {
854         int status;
855         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
856         struct ocrdma_create_cq_uresp uresp;
857
858         memset(&uresp, 0, sizeof(uresp));
859         uresp.cq_id = cq->id;
860         uresp.page_size = PAGE_ALIGN(cq->len);
861         uresp.num_pages = 1;
862         uresp.max_hw_cqe = cq->max_hw_cqe;
863         uresp.page_addr[0] = cq->pa;
864         uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
865         uresp.db_page_size = dev->nic_info.db_page_size;
866         uresp.phase_change = cq->phase_change ? 1 : 0;
867         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
868         if (status) {
869                 pr_err("%s(%d) copy error cqid=0x%x.\n",
870                        __func__, dev->id, cq->id);
871                 goto err;
872         }
873         status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
874         if (status)
875                 goto err;
876         status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
877         if (status) {
878                 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
879                 goto err;
880         }
881         cq->ucontext = uctx;
882 err:
883         return status;
884 }
885
886 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
887                                struct ib_ucontext *ib_ctx,
888                                struct ib_udata *udata)
889 {
890         struct ocrdma_cq *cq;
891         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
892         struct ocrdma_ucontext *uctx = NULL;
893         u16 pd_id = 0;
894         int status;
895         struct ocrdma_create_cq_ureq ureq;
896
897         if (udata) {
898                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
899                         return ERR_PTR(-EFAULT);
900         } else
901                 ureq.dpp_cq = 0;
902         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
903         if (!cq)
904                 return ERR_PTR(-ENOMEM);
905
906         spin_lock_init(&cq->cq_lock);
907         spin_lock_init(&cq->comp_handler_lock);
908         INIT_LIST_HEAD(&cq->sq_head);
909         INIT_LIST_HEAD(&cq->rq_head);
910         cq->first_arm = true;
911
912         if (ib_ctx) {
913                 uctx = get_ocrdma_ucontext(ib_ctx);
914                 pd_id = uctx->cntxt_pd->id;
915         }
916
917         status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
918         if (status) {
919                 kfree(cq);
920                 return ERR_PTR(status);
921         }
922         if (ib_ctx) {
923                 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
924                 if (status)
925                         goto ctx_err;
926         }
927         cq->phase = OCRDMA_CQE_VALID;
928         dev->cq_tbl[cq->id] = cq;
929         return &cq->ibcq;
930
931 ctx_err:
932         ocrdma_mbx_destroy_cq(dev, cq);
933         kfree(cq);
934         return ERR_PTR(status);
935 }
936
937 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
938                      struct ib_udata *udata)
939 {
940         int status = 0;
941         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
942
943         if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
944                 status = -EINVAL;
945                 return status;
946         }
947         ibcq->cqe = new_cnt;
948         return status;
949 }
950
951 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
952 {
953         int cqe_cnt;
954         int valid_count = 0;
955         unsigned long flags;
956
957         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
958         struct ocrdma_cqe *cqe = NULL;
959
960         cqe = cq->va;
961         cqe_cnt = cq->cqe_cnt;
962
963         /* Last irq might have scheduled a polling thread
964          * sync-up with it before hard flushing.
965          */
966         spin_lock_irqsave(&cq->cq_lock, flags);
967         while (cqe_cnt) {
968                 if (is_cqe_valid(cq, cqe))
969                         valid_count++;
970                 cqe++;
971                 cqe_cnt--;
972         }
973         ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
974         spin_unlock_irqrestore(&cq->cq_lock, flags);
975 }
976
977 int ocrdma_destroy_cq(struct ib_cq *ibcq)
978 {
979         int status;
980         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
981         struct ocrdma_eq *eq = NULL;
982         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
983         int pdid = 0;
984         u32 irq, indx;
985
986         dev->cq_tbl[cq->id] = NULL;
987         indx = ocrdma_get_eq_table_index(dev, cq->eqn);
988         if (indx == -EINVAL)
989                 BUG();
990
991         eq = &dev->eq_tbl[indx];
992         irq = ocrdma_get_irq(dev, eq);
993         synchronize_irq(irq);
994         ocrdma_flush_cq(cq);
995
996         status = ocrdma_mbx_destroy_cq(dev, cq);
997         if (cq->ucontext) {
998                 pdid = cq->ucontext->cntxt_pd->id;
999                 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1000                                 PAGE_ALIGN(cq->len));
1001                 ocrdma_del_mmap(cq->ucontext,
1002                                 ocrdma_get_db_addr(dev, pdid),
1003                                 dev->nic_info.db_page_size);
1004         }
1005
1006         kfree(cq);
1007         return status;
1008 }
1009
1010 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1011 {
1012         int status = -EINVAL;
1013
1014         if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1015                 dev->qp_tbl[qp->id] = qp;
1016                 status = 0;
1017         }
1018         return status;
1019 }
1020
1021 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1022 {
1023         dev->qp_tbl[qp->id] = NULL;
1024 }
1025
1026 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1027                                   struct ib_qp_init_attr *attrs)
1028 {
1029         if ((attrs->qp_type != IB_QPT_GSI) &&
1030             (attrs->qp_type != IB_QPT_RC) &&
1031             (attrs->qp_type != IB_QPT_UC) &&
1032             (attrs->qp_type != IB_QPT_UD)) {
1033                 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1034                        __func__, dev->id, attrs->qp_type);
1035                 return -EINVAL;
1036         }
1037         /* Skip the check for QP1 to support CM size of 128 */
1038         if ((attrs->qp_type != IB_QPT_GSI) &&
1039             (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1040                 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1041                        __func__, dev->id, attrs->cap.max_send_wr);
1042                 pr_err("%s(%d) supported send_wr=0x%x\n",
1043                        __func__, dev->id, dev->attr.max_wqe);
1044                 return -EINVAL;
1045         }
1046         if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1047                 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1048                        __func__, dev->id, attrs->cap.max_recv_wr);
1049                 pr_err("%s(%d) supported recv_wr=0x%x\n",
1050                        __func__, dev->id, dev->attr.max_rqe);
1051                 return -EINVAL;
1052         }
1053         if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1054                 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1055                        __func__, dev->id, attrs->cap.max_inline_data);
1056                 pr_err("%s(%d) supported inline data size=0x%x\n",
1057                        __func__, dev->id, dev->attr.max_inline_data);
1058                 return -EINVAL;
1059         }
1060         if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1061                 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1062                        __func__, dev->id, attrs->cap.max_send_sge);
1063                 pr_err("%s(%d) supported send_sge=0x%x\n",
1064                        __func__, dev->id, dev->attr.max_send_sge);
1065                 return -EINVAL;
1066         }
1067         if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1068                 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1069                        __func__, dev->id, attrs->cap.max_recv_sge);
1070                 pr_err("%s(%d) supported recv_sge=0x%x\n",
1071                        __func__, dev->id, dev->attr.max_recv_sge);
1072                 return -EINVAL;
1073         }
1074         /* unprivileged user space cannot create special QP */
1075         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1076                 pr_err
1077                     ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1078                      __func__, dev->id, attrs->qp_type);
1079                 return -EINVAL;
1080         }
1081         /* allow creating only one GSI type of QP */
1082         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1083                 pr_err("%s(%d) GSI special QPs already created.\n",
1084                        __func__, dev->id);
1085                 return -EINVAL;
1086         }
1087         /* verify consumer QPs are not trying to use GSI QP's CQ */
1088         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1089                 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1090                         (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1091                         pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1092                                 __func__, dev->id);
1093                         return -EINVAL;
1094                 }
1095         }
1096         return 0;
1097 }
1098
1099 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1100                                 struct ib_udata *udata, int dpp_offset,
1101                                 int dpp_credit_lmt, int srq)
1102 {
1103         int status = 0;
1104         u64 usr_db;
1105         struct ocrdma_create_qp_uresp uresp;
1106         struct ocrdma_dev *dev = qp->dev;
1107         struct ocrdma_pd *pd = qp->pd;
1108
1109         memset(&uresp, 0, sizeof(uresp));
1110         usr_db = dev->nic_info.unmapped_db +
1111                         (pd->id * dev->nic_info.db_page_size);
1112         uresp.qp_id = qp->id;
1113         uresp.sq_dbid = qp->sq.dbid;
1114         uresp.num_sq_pages = 1;
1115         uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1116         uresp.sq_page_addr[0] = qp->sq.pa;
1117         uresp.num_wqe_allocated = qp->sq.max_cnt;
1118         if (!srq) {
1119                 uresp.rq_dbid = qp->rq.dbid;
1120                 uresp.num_rq_pages = 1;
1121                 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1122                 uresp.rq_page_addr[0] = qp->rq.pa;
1123                 uresp.num_rqe_allocated = qp->rq.max_cnt;
1124         }
1125         uresp.db_page_addr = usr_db;
1126         uresp.db_page_size = dev->nic_info.db_page_size;
1127         uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1128         uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1129         uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1130
1131         if (qp->dpp_enabled) {
1132                 uresp.dpp_credit = dpp_credit_lmt;
1133                 uresp.dpp_offset = dpp_offset;
1134         }
1135         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1136         if (status) {
1137                 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1138                 goto err;
1139         }
1140         status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1141                                  uresp.sq_page_size);
1142         if (status)
1143                 goto err;
1144
1145         if (!srq) {
1146                 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1147                                          uresp.rq_page_size);
1148                 if (status)
1149                         goto rq_map_err;
1150         }
1151         return status;
1152 rq_map_err:
1153         ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1154 err:
1155         return status;
1156 }
1157
1158 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1159                              struct ocrdma_pd *pd)
1160 {
1161         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1162                 qp->sq_db = dev->nic_info.db +
1163                         (pd->id * dev->nic_info.db_page_size) +
1164                         OCRDMA_DB_GEN2_SQ_OFFSET;
1165                 qp->rq_db = dev->nic_info.db +
1166                         (pd->id * dev->nic_info.db_page_size) +
1167                         OCRDMA_DB_GEN2_RQ_OFFSET;
1168         } else {
1169                 qp->sq_db = dev->nic_info.db +
1170                         (pd->id * dev->nic_info.db_page_size) +
1171                         OCRDMA_DB_SQ_OFFSET;
1172                 qp->rq_db = dev->nic_info.db +
1173                         (pd->id * dev->nic_info.db_page_size) +
1174                         OCRDMA_DB_RQ_OFFSET;
1175         }
1176 }
1177
1178 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1179 {
1180         qp->wqe_wr_id_tbl =
1181             kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1182                     GFP_KERNEL);
1183         if (qp->wqe_wr_id_tbl == NULL)
1184                 return -ENOMEM;
1185         qp->rqe_wr_id_tbl =
1186             kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1187         if (qp->rqe_wr_id_tbl == NULL)
1188                 return -ENOMEM;
1189
1190         return 0;
1191 }
1192
1193 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1194                                       struct ocrdma_pd *pd,
1195                                       struct ib_qp_init_attr *attrs)
1196 {
1197         qp->pd = pd;
1198         spin_lock_init(&qp->q_lock);
1199         INIT_LIST_HEAD(&qp->sq_entry);
1200         INIT_LIST_HEAD(&qp->rq_entry);
1201
1202         qp->qp_type = attrs->qp_type;
1203         qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1204         qp->max_inline_data = attrs->cap.max_inline_data;
1205         qp->sq.max_sges = attrs->cap.max_send_sge;
1206         qp->rq.max_sges = attrs->cap.max_recv_sge;
1207         qp->state = OCRDMA_QPS_RST;
1208         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1209 }
1210
1211 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1212                                    struct ib_qp_init_attr *attrs)
1213 {
1214         if (attrs->qp_type == IB_QPT_GSI) {
1215                 dev->gsi_qp_created = 1;
1216                 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1217                 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1218         }
1219 }
1220
1221 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1222                                struct ib_qp_init_attr *attrs,
1223                                struct ib_udata *udata)
1224 {
1225         int status;
1226         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1227         struct ocrdma_qp *qp;
1228         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1229         struct ocrdma_create_qp_ureq ureq;
1230         u16 dpp_credit_lmt, dpp_offset;
1231
1232         status = ocrdma_check_qp_params(ibpd, dev, attrs);
1233         if (status)
1234                 goto gen_err;
1235
1236         memset(&ureq, 0, sizeof(ureq));
1237         if (udata) {
1238                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1239                         return ERR_PTR(-EFAULT);
1240         }
1241         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1242         if (!qp) {
1243                 status = -ENOMEM;
1244                 goto gen_err;
1245         }
1246         qp->dev = dev;
1247         ocrdma_set_qp_init_params(qp, pd, attrs);
1248         if (udata == NULL)
1249                 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1250                                         OCRDMA_QP_FAST_REG);
1251
1252         mutex_lock(&dev->dev_lock);
1253         status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1254                                         ureq.dpp_cq_id,
1255                                         &dpp_offset, &dpp_credit_lmt);
1256         if (status)
1257                 goto mbx_err;
1258
1259         /* user space QP's wr_id table are managed in library */
1260         if (udata == NULL) {
1261                 status = ocrdma_alloc_wr_id_tbl(qp);
1262                 if (status)
1263                         goto map_err;
1264         }
1265
1266         status = ocrdma_add_qpn_map(dev, qp);
1267         if (status)
1268                 goto map_err;
1269         ocrdma_set_qp_db(dev, qp, pd);
1270         if (udata) {
1271                 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1272                                               dpp_credit_lmt,
1273                                               (attrs->srq != NULL));
1274                 if (status)
1275                         goto cpy_err;
1276         }
1277         ocrdma_store_gsi_qp_cq(dev, attrs);
1278         qp->ibqp.qp_num = qp->id;
1279         mutex_unlock(&dev->dev_lock);
1280         return &qp->ibqp;
1281
1282 cpy_err:
1283         ocrdma_del_qpn_map(dev, qp);
1284 map_err:
1285         ocrdma_mbx_destroy_qp(dev, qp);
1286 mbx_err:
1287         mutex_unlock(&dev->dev_lock);
1288         kfree(qp->wqe_wr_id_tbl);
1289         kfree(qp->rqe_wr_id_tbl);
1290         kfree(qp);
1291         pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1292 gen_err:
1293         return ERR_PTR(status);
1294 }
1295
1296 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1297                       int attr_mask)
1298 {
1299         int status = 0;
1300         struct ocrdma_qp *qp;
1301         struct ocrdma_dev *dev;
1302         enum ib_qp_state old_qps;
1303
1304         qp = get_ocrdma_qp(ibqp);
1305         dev = qp->dev;
1306         if (attr_mask & IB_QP_STATE)
1307                 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1308         /* if new and previous states are same hw doesn't need to
1309          * know about it.
1310          */
1311         if (status < 0)
1312                 return status;
1313         status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1314
1315         return status;
1316 }
1317
1318 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1319                      int attr_mask, struct ib_udata *udata)
1320 {
1321         unsigned long flags;
1322         int status = -EINVAL;
1323         struct ocrdma_qp *qp;
1324         struct ocrdma_dev *dev;
1325         enum ib_qp_state old_qps, new_qps;
1326
1327         qp = get_ocrdma_qp(ibqp);
1328         dev = qp->dev;
1329
1330         /* syncronize with multiple context trying to change, retrive qps */
1331         mutex_lock(&dev->dev_lock);
1332         /* syncronize with wqe, rqe posting and cqe processing contexts */
1333         spin_lock_irqsave(&qp->q_lock, flags);
1334         old_qps = get_ibqp_state(qp->state);
1335         if (attr_mask & IB_QP_STATE)
1336                 new_qps = attr->qp_state;
1337         else
1338                 new_qps = old_qps;
1339         spin_unlock_irqrestore(&qp->q_lock, flags);
1340
1341         if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1342                                 IB_LINK_LAYER_ETHERNET)) {
1343                 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1344                        "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1345                        __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1346                        old_qps, new_qps);
1347                 goto param_err;
1348         }
1349
1350         status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1351         if (status > 0)
1352                 status = 0;
1353 param_err:
1354         mutex_unlock(&dev->dev_lock);
1355         return status;
1356 }
1357
1358 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1359 {
1360         switch (mtu) {
1361         case 256:
1362                 return IB_MTU_256;
1363         case 512:
1364                 return IB_MTU_512;
1365         case 1024:
1366                 return IB_MTU_1024;
1367         case 2048:
1368                 return IB_MTU_2048;
1369         case 4096:
1370                 return IB_MTU_4096;
1371         default:
1372                 return IB_MTU_1024;
1373         }
1374 }
1375
1376 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1377 {
1378         int ib_qp_acc_flags = 0;
1379
1380         if (qp_cap_flags & OCRDMA_QP_INB_WR)
1381                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1382         if (qp_cap_flags & OCRDMA_QP_INB_RD)
1383                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1384         return ib_qp_acc_flags;
1385 }
1386
1387 int ocrdma_query_qp(struct ib_qp *ibqp,
1388                     struct ib_qp_attr *qp_attr,
1389                     int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1390 {
1391         int status;
1392         u32 qp_state;
1393         struct ocrdma_qp_params params;
1394         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1395         struct ocrdma_dev *dev = qp->dev;
1396
1397         memset(&params, 0, sizeof(params));
1398         mutex_lock(&dev->dev_lock);
1399         status = ocrdma_mbx_query_qp(dev, qp, &params);
1400         mutex_unlock(&dev->dev_lock);
1401         if (status)
1402                 goto mbx_err;
1403         qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
1404         qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
1405         qp_attr->path_mtu =
1406                 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1407                                 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1408                                 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1409         qp_attr->path_mig_state = IB_MIG_MIGRATED;
1410         qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1411         qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1412         qp_attr->dest_qp_num =
1413             params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1414
1415         qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1416         qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1417         qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1418         qp_attr->cap.max_send_sge = qp->sq.max_sges;
1419         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1420         qp_attr->cap.max_inline_data = qp->max_inline_data;
1421         qp_init_attr->cap = qp_attr->cap;
1422         memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1423                sizeof(params.dgid));
1424         qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1425             OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1426         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1427         qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1428                                           OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1429                                                 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1430         qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1431                                               OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1432                                                 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1433
1434         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1435         qp_attr->ah_attr.port_num = 1;
1436         qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1437                                OCRDMA_QP_PARAMS_SL_MASK) >>
1438                                 OCRDMA_QP_PARAMS_SL_SHIFT;
1439         qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1440                             OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1441                                 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1442         qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1443                               OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1444                                 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1445         qp_attr->retry_cnt =
1446             (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1447                 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1448         qp_attr->min_rnr_timer = 0;
1449         qp_attr->pkey_index = 0;
1450         qp_attr->port_num = 1;
1451         qp_attr->ah_attr.src_path_bits = 0;
1452         qp_attr->ah_attr.static_rate = 0;
1453         qp_attr->alt_pkey_index = 0;
1454         qp_attr->alt_port_num = 0;
1455         qp_attr->alt_timeout = 0;
1456         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1457         qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1458                     OCRDMA_QP_PARAMS_STATE_SHIFT;
1459         qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1460         qp_attr->max_dest_rd_atomic =
1461             params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1462         qp_attr->max_rd_atomic =
1463             params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1464         qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1465                                 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1466 mbx_err:
1467         return status;
1468 }
1469
1470 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
1471 {
1472         int i = idx / 32;
1473         unsigned int mask = (1 << (idx % 32));
1474
1475         if (srq->idx_bit_fields[i] & mask)
1476                 srq->idx_bit_fields[i] &= ~mask;
1477         else
1478                 srq->idx_bit_fields[i] |= mask;
1479 }
1480
1481 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1482 {
1483         return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1484 }
1485
1486 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1487 {
1488         return (qp->sq.tail == qp->sq.head);
1489 }
1490
1491 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1492 {
1493         return (qp->rq.tail == qp->rq.head);
1494 }
1495
1496 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1497 {
1498         return q->va + (q->head * q->entry_size);
1499 }
1500
1501 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1502                                       u32 idx)
1503 {
1504         return q->va + (idx * q->entry_size);
1505 }
1506
1507 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1508 {
1509         q->head = (q->head + 1) & q->max_wqe_idx;
1510 }
1511
1512 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1513 {
1514         q->tail = (q->tail + 1) & q->max_wqe_idx;
1515 }
1516
1517 /* discard the cqe for a given QP */
1518 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1519 {
1520         unsigned long cq_flags;
1521         unsigned long flags;
1522         int discard_cnt = 0;
1523         u32 cur_getp, stop_getp;
1524         struct ocrdma_cqe *cqe;
1525         u32 qpn = 0, wqe_idx = 0;
1526
1527         spin_lock_irqsave(&cq->cq_lock, cq_flags);
1528
1529         /* traverse through the CQEs in the hw CQ,
1530          * find the matching CQE for a given qp,
1531          * mark the matching one discarded by clearing qpn.
1532          * ring the doorbell in the poll_cq() as
1533          * we don't complete out of order cqe.
1534          */
1535
1536         cur_getp = cq->getp;
1537         /* find upto when do we reap the cq. */
1538         stop_getp = cur_getp;
1539         do {
1540                 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1541                         break;
1542
1543                 cqe = cq->va + cur_getp;
1544                 /* if (a) done reaping whole hw cq, or
1545                  *    (b) qp_xq becomes empty.
1546                  * then exit
1547                  */
1548                 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1549                 /* if previously discarded cqe found, skip that too. */
1550                 /* check for matching qp */
1551                 if (qpn == 0 || qpn != qp->id)
1552                         goto skip_cqe;
1553
1554                 if (is_cqe_for_sq(cqe)) {
1555                         ocrdma_hwq_inc_tail(&qp->sq);
1556                 } else {
1557                         if (qp->srq) {
1558                                 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1559                                         OCRDMA_CQE_BUFTAG_SHIFT) &
1560                                         qp->srq->rq.max_wqe_idx;
1561                                 if (wqe_idx < 1)
1562                                         BUG();
1563                                 spin_lock_irqsave(&qp->srq->q_lock, flags);
1564                                 ocrdma_hwq_inc_tail(&qp->srq->rq);
1565                                 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1566                                 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1567
1568                         } else {
1569                                 ocrdma_hwq_inc_tail(&qp->rq);
1570                         }
1571                 }
1572                 /* mark cqe discarded so that it is not picked up later
1573                  * in the poll_cq().
1574                  */
1575                 discard_cnt += 1;
1576                 cqe->cmn.qpn = 0;
1577 skip_cqe:
1578                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1579         } while (cur_getp != stop_getp);
1580         spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1581 }
1582
1583 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1584 {
1585         int found = false;
1586         unsigned long flags;
1587         struct ocrdma_dev *dev = qp->dev;
1588         /* sync with any active CQ poll */
1589
1590         spin_lock_irqsave(&dev->flush_q_lock, flags);
1591         found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1592         if (found)
1593                 list_del(&qp->sq_entry);
1594         if (!qp->srq) {
1595                 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1596                 if (found)
1597                         list_del(&qp->rq_entry);
1598         }
1599         spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1600 }
1601
1602 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1603 {
1604         int status;
1605         struct ocrdma_pd *pd;
1606         struct ocrdma_qp *qp;
1607         struct ocrdma_dev *dev;
1608         struct ib_qp_attr attrs;
1609         int attr_mask = IB_QP_STATE;
1610         unsigned long flags;
1611
1612         qp = get_ocrdma_qp(ibqp);
1613         dev = qp->dev;
1614
1615         attrs.qp_state = IB_QPS_ERR;
1616         pd = qp->pd;
1617
1618         /* change the QP state to ERROR */
1619         _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1620
1621         /* ensure that CQEs for newly created QP (whose id may be same with
1622          * one which just getting destroyed are same), dont get
1623          * discarded until the old CQEs are discarded.
1624          */
1625         mutex_lock(&dev->dev_lock);
1626         status = ocrdma_mbx_destroy_qp(dev, qp);
1627
1628         /*
1629          * acquire CQ lock while destroy is in progress, in order to
1630          * protect against proessing in-flight CQEs for this QP.
1631          */
1632         spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1633         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1634                 spin_lock(&qp->rq_cq->cq_lock);
1635
1636         ocrdma_del_qpn_map(dev, qp);
1637
1638         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1639                 spin_unlock(&qp->rq_cq->cq_lock);
1640         spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1641
1642         if (!pd->uctx) {
1643                 ocrdma_discard_cqes(qp, qp->sq_cq);
1644                 ocrdma_discard_cqes(qp, qp->rq_cq);
1645         }
1646         mutex_unlock(&dev->dev_lock);
1647
1648         if (pd->uctx) {
1649                 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1650                                 PAGE_ALIGN(qp->sq.len));
1651                 if (!qp->srq)
1652                         ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1653                                         PAGE_ALIGN(qp->rq.len));
1654         }
1655
1656         ocrdma_del_flush_qp(qp);
1657
1658         kfree(qp->wqe_wr_id_tbl);
1659         kfree(qp->rqe_wr_id_tbl);
1660         kfree(qp);
1661         return status;
1662 }
1663
1664 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1665                                 struct ib_udata *udata)
1666 {
1667         int status;
1668         struct ocrdma_create_srq_uresp uresp;
1669
1670         memset(&uresp, 0, sizeof(uresp));
1671         uresp.rq_dbid = srq->rq.dbid;
1672         uresp.num_rq_pages = 1;
1673         uresp.rq_page_addr[0] = srq->rq.pa;
1674         uresp.rq_page_size = srq->rq.len;
1675         uresp.db_page_addr = dev->nic_info.unmapped_db +
1676             (srq->pd->id * dev->nic_info.db_page_size);
1677         uresp.db_page_size = dev->nic_info.db_page_size;
1678         uresp.num_rqe_allocated = srq->rq.max_cnt;
1679         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1680                 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1681                 uresp.db_shift = 24;
1682         } else {
1683                 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1684                 uresp.db_shift = 16;
1685         }
1686
1687         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1688         if (status)
1689                 return status;
1690         status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1691                                  uresp.rq_page_size);
1692         if (status)
1693                 return status;
1694         return status;
1695 }
1696
1697 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1698                                  struct ib_srq_init_attr *init_attr,
1699                                  struct ib_udata *udata)
1700 {
1701         int status = -ENOMEM;
1702         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1703         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1704         struct ocrdma_srq *srq;
1705
1706         if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1707                 return ERR_PTR(-EINVAL);
1708         if (init_attr->attr.max_wr > dev->attr.max_rqe)
1709                 return ERR_PTR(-EINVAL);
1710
1711         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1712         if (!srq)
1713                 return ERR_PTR(status);
1714
1715         spin_lock_init(&srq->q_lock);
1716         srq->pd = pd;
1717         srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1718         status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1719         if (status)
1720                 goto err;
1721
1722         if (udata == NULL) {
1723                 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1724                             GFP_KERNEL);
1725                 if (srq->rqe_wr_id_tbl == NULL)
1726                         goto arm_err;
1727
1728                 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1729                     (srq->rq.max_cnt % 32 ? 1 : 0);
1730                 srq->idx_bit_fields =
1731                     kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1732                 if (srq->idx_bit_fields == NULL)
1733                         goto arm_err;
1734                 memset(srq->idx_bit_fields, 0xff,
1735                        srq->bit_fields_len * sizeof(u32));
1736         }
1737
1738         if (init_attr->attr.srq_limit) {
1739                 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1740                 if (status)
1741                         goto arm_err;
1742         }
1743
1744         if (udata) {
1745                 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1746                 if (status)
1747                         goto arm_err;
1748         }
1749
1750         return &srq->ibsrq;
1751
1752 arm_err:
1753         ocrdma_mbx_destroy_srq(dev, srq);
1754 err:
1755         kfree(srq->rqe_wr_id_tbl);
1756         kfree(srq->idx_bit_fields);
1757         kfree(srq);
1758         return ERR_PTR(status);
1759 }
1760
1761 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1762                       struct ib_srq_attr *srq_attr,
1763                       enum ib_srq_attr_mask srq_attr_mask,
1764                       struct ib_udata *udata)
1765 {
1766         int status = 0;
1767         struct ocrdma_srq *srq;
1768
1769         srq = get_ocrdma_srq(ibsrq);
1770         if (srq_attr_mask & IB_SRQ_MAX_WR)
1771                 status = -EINVAL;
1772         else
1773                 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1774         return status;
1775 }
1776
1777 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1778 {
1779         int status;
1780         struct ocrdma_srq *srq;
1781
1782         srq = get_ocrdma_srq(ibsrq);
1783         status = ocrdma_mbx_query_srq(srq, srq_attr);
1784         return status;
1785 }
1786
1787 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1788 {
1789         int status;
1790         struct ocrdma_srq *srq;
1791         struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1792
1793         srq = get_ocrdma_srq(ibsrq);
1794
1795         status = ocrdma_mbx_destroy_srq(dev, srq);
1796
1797         if (srq->pd->uctx)
1798                 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1799                                 PAGE_ALIGN(srq->rq.len));
1800
1801         kfree(srq->idx_bit_fields);
1802         kfree(srq->rqe_wr_id_tbl);
1803         kfree(srq);
1804         return status;
1805 }
1806
1807 /* unprivileged verbs and their support functions. */
1808 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1809                                 struct ocrdma_hdr_wqe *hdr,
1810                                 struct ib_send_wr *wr)
1811 {
1812         struct ocrdma_ewqe_ud_hdr *ud_hdr =
1813                 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1814         struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1815
1816         ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1817         if (qp->qp_type == IB_QPT_GSI)
1818                 ud_hdr->qkey = qp->qkey;
1819         else
1820                 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1821         ud_hdr->rsvd_ahid = ah->id;
1822 }
1823
1824 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1825                               struct ocrdma_sge *sge, int num_sge,
1826                               struct ib_sge *sg_list)
1827 {
1828         int i;
1829
1830         for (i = 0; i < num_sge; i++) {
1831                 sge[i].lrkey = sg_list[i].lkey;
1832                 sge[i].addr_lo = sg_list[i].addr;
1833                 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1834                 sge[i].len = sg_list[i].length;
1835                 hdr->total_len += sg_list[i].length;
1836         }
1837         if (num_sge == 0)
1838                 memset(sge, 0, sizeof(*sge));
1839 }
1840
1841 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1842 {
1843         uint32_t total_len = 0, i;
1844
1845         for (i = 0; i < num_sge; i++)
1846                 total_len += sg_list[i].length;
1847         return total_len;
1848 }
1849
1850
1851 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1852                                     struct ocrdma_hdr_wqe *hdr,
1853                                     struct ocrdma_sge *sge,
1854                                     struct ib_send_wr *wr, u32 wqe_size)
1855 {
1856         int i;
1857         char *dpp_addr;
1858
1859         if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1860                 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1861                 if (unlikely(hdr->total_len > qp->max_inline_data)) {
1862                         pr_err("%s() supported_len=0x%x,\n"
1863                                " unspported len req=0x%x\n", __func__,
1864                                 qp->max_inline_data, hdr->total_len);
1865                         return -EINVAL;
1866                 }
1867                 dpp_addr = (char *)sge;
1868                 for (i = 0; i < wr->num_sge; i++) {
1869                         memcpy(dpp_addr,
1870                                (void *)(unsigned long)wr->sg_list[i].addr,
1871                                wr->sg_list[i].length);
1872                         dpp_addr += wr->sg_list[i].length;
1873                 }
1874
1875                 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1876                 if (0 == hdr->total_len)
1877                         wqe_size += sizeof(struct ocrdma_sge);
1878                 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1879         } else {
1880                 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1881                 if (wr->num_sge)
1882                         wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1883                 else
1884                         wqe_size += sizeof(struct ocrdma_sge);
1885                 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1886         }
1887         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1888         return 0;
1889 }
1890
1891 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1892                              struct ib_send_wr *wr)
1893 {
1894         int status;
1895         struct ocrdma_sge *sge;
1896         u32 wqe_size = sizeof(*hdr);
1897
1898         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1899                 ocrdma_build_ud_hdr(qp, hdr, wr);
1900                 sge = (struct ocrdma_sge *)(hdr + 2);
1901                 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
1902         } else {
1903                 sge = (struct ocrdma_sge *)(hdr + 1);
1904         }
1905
1906         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1907         return status;
1908 }
1909
1910 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1911                               struct ib_send_wr *wr)
1912 {
1913         int status;
1914         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1915         struct ocrdma_sge *sge = ext_rw + 1;
1916         u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1917
1918         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1919         if (status)
1920                 return status;
1921         ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1922         ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1923         ext_rw->lrkey = wr->wr.rdma.rkey;
1924         ext_rw->len = hdr->total_len;
1925         return 0;
1926 }
1927
1928 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1929                               struct ib_send_wr *wr)
1930 {
1931         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1932         struct ocrdma_sge *sge = ext_rw + 1;
1933         u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
1934             sizeof(struct ocrdma_hdr_wqe);
1935
1936         ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1937         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1938         hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
1939         hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1940
1941         ext_rw->addr_lo = wr->wr.rdma.remote_addr;
1942         ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
1943         ext_rw->lrkey = wr->wr.rdma.rkey;
1944         ext_rw->len = hdr->total_len;
1945 }
1946
1947 static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
1948                             struct ocrdma_hw_mr *hwmr)
1949 {
1950         int i;
1951         u64 buf_addr = 0;
1952         int num_pbes;
1953         struct ocrdma_pbe *pbe;
1954
1955         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1956         num_pbes = 0;
1957
1958         /* go through the OS phy regions & fill hw pbe entries into pbls. */
1959         for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
1960                 /* number of pbes can be more for one OS buf, when
1961                  * buffers are of different sizes.
1962                  * split the ib_buf to one or more pbes.
1963                  */
1964                 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
1965                 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
1966                 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
1967                 num_pbes += 1;
1968                 pbe++;
1969
1970                 /* if the pbl is full storing the pbes,
1971                  * move to next pbl.
1972                 */
1973                 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
1974                         pbl_tbl++;
1975                         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
1976                 }
1977         }
1978         return;
1979 }
1980
1981 static int get_encoded_page_size(int pg_sz)
1982 {
1983         /* Max size is 256M 4096 << 16 */
1984         int i = 0;
1985         for (; i < 17; i++)
1986                 if (pg_sz == (4096 << i))
1987                         break;
1988         return i;
1989 }
1990
1991
1992 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1993                            struct ib_send_wr *wr)
1994 {
1995         u64 fbo;
1996         struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
1997         struct ocrdma_mr *mr;
1998         u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
1999
2000         wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2001
2002         if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
2003                 return -EINVAL;
2004
2005         hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2006         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2007
2008         if (wr->wr.fast_reg.page_list_len == 0)
2009                 BUG();
2010         if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
2011                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2012         if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
2013                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2014         if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
2015                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2016         hdr->lkey = wr->wr.fast_reg.rkey;
2017         hdr->total_len = wr->wr.fast_reg.length;
2018
2019         fbo = wr->wr.fast_reg.iova_start -
2020             (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2021
2022         fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2023         fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2024         fast_reg->fbo_hi = upper_32_bits(fbo);
2025         fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2026         fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2027         fast_reg->size_sge =
2028                 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2029         mr = (struct ocrdma_mr *) (unsigned long)
2030                 qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2031         build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2032         return 0;
2033 }
2034
2035 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2036 {
2037         u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2038
2039         iowrite32(val, qp->sq_db);
2040 }
2041
2042 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2043                      struct ib_send_wr **bad_wr)
2044 {
2045         int status = 0;
2046         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2047         struct ocrdma_hdr_wqe *hdr;
2048         unsigned long flags;
2049
2050         spin_lock_irqsave(&qp->q_lock, flags);
2051         if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2052                 spin_unlock_irqrestore(&qp->q_lock, flags);
2053                 *bad_wr = wr;
2054                 return -EINVAL;
2055         }
2056
2057         while (wr) {
2058                 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2059                     wr->num_sge > qp->sq.max_sges) {
2060                         *bad_wr = wr;
2061                         status = -ENOMEM;
2062                         break;
2063                 }
2064                 hdr = ocrdma_hwq_head(&qp->sq);
2065                 hdr->cw = 0;
2066                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2067                         hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2068                 if (wr->send_flags & IB_SEND_FENCE)
2069                         hdr->cw |=
2070                             (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2071                 if (wr->send_flags & IB_SEND_SOLICITED)
2072                         hdr->cw |=
2073                             (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2074                 hdr->total_len = 0;
2075                 switch (wr->opcode) {
2076                 case IB_WR_SEND_WITH_IMM:
2077                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2078                         hdr->immdt = ntohl(wr->ex.imm_data);
2079                 case IB_WR_SEND:
2080                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2081                         ocrdma_build_send(qp, hdr, wr);
2082                         break;
2083                 case IB_WR_SEND_WITH_INV:
2084                         hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2085                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2086                         hdr->lkey = wr->ex.invalidate_rkey;
2087                         status = ocrdma_build_send(qp, hdr, wr);
2088                         break;
2089                 case IB_WR_RDMA_WRITE_WITH_IMM:
2090                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2091                         hdr->immdt = ntohl(wr->ex.imm_data);
2092                 case IB_WR_RDMA_WRITE:
2093                         hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2094                         status = ocrdma_build_write(qp, hdr, wr);
2095                         break;
2096                 case IB_WR_RDMA_READ_WITH_INV:
2097                         hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2098                 case IB_WR_RDMA_READ:
2099                         ocrdma_build_read(qp, hdr, wr);
2100                         break;
2101                 case IB_WR_LOCAL_INV:
2102                         hdr->cw |=
2103                             (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2104                         hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2105                                         sizeof(struct ocrdma_sge)) /
2106                                 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2107                         hdr->lkey = wr->ex.invalidate_rkey;
2108                         break;
2109                 case IB_WR_FAST_REG_MR:
2110                         status = ocrdma_build_fr(qp, hdr, wr);
2111                         break;
2112                 default:
2113                         status = -EINVAL;
2114                         break;
2115                 }
2116                 if (status) {
2117                         *bad_wr = wr;
2118                         break;
2119                 }
2120                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2121                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2122                 else
2123                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2124                 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2125                 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2126                                    OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2127                 /* make sure wqe is written before adapter can access it */
2128                 wmb();
2129                 /* inform hw to start processing it */
2130                 ocrdma_ring_sq_db(qp);
2131
2132                 /* update pointer, counter for next wr */
2133                 ocrdma_hwq_inc_head(&qp->sq);
2134                 wr = wr->next;
2135         }
2136         spin_unlock_irqrestore(&qp->q_lock, flags);
2137         return status;
2138 }
2139
2140 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2141 {
2142         u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2143
2144         iowrite32(val, qp->rq_db);
2145 }
2146
2147 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2148                              u16 tag)
2149 {
2150         u32 wqe_size = 0;
2151         struct ocrdma_sge *sge;
2152         if (wr->num_sge)
2153                 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2154         else
2155                 wqe_size = sizeof(*sge) + sizeof(*rqe);
2156
2157         rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2158                                 OCRDMA_WQE_SIZE_SHIFT);
2159         rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2160         rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2161         rqe->total_len = 0;
2162         rqe->rsvd_tag = tag;
2163         sge = (struct ocrdma_sge *)(rqe + 1);
2164         ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2165         ocrdma_cpu_to_le32(rqe, wqe_size);
2166 }
2167
2168 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2169                      struct ib_recv_wr **bad_wr)
2170 {
2171         int status = 0;
2172         unsigned long flags;
2173         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2174         struct ocrdma_hdr_wqe *rqe;
2175
2176         spin_lock_irqsave(&qp->q_lock, flags);
2177         if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2178                 spin_unlock_irqrestore(&qp->q_lock, flags);
2179                 *bad_wr = wr;
2180                 return -EINVAL;
2181         }
2182         while (wr) {
2183                 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2184                     wr->num_sge > qp->rq.max_sges) {
2185                         *bad_wr = wr;
2186                         status = -ENOMEM;
2187                         break;
2188                 }
2189                 rqe = ocrdma_hwq_head(&qp->rq);
2190                 ocrdma_build_rqe(rqe, wr, 0);
2191
2192                 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2193                 /* make sure rqe is written before adapter can access it */
2194                 wmb();
2195
2196                 /* inform hw to start processing it */
2197                 ocrdma_ring_rq_db(qp);
2198
2199                 /* update pointer, counter for next wr */
2200                 ocrdma_hwq_inc_head(&qp->rq);
2201                 wr = wr->next;
2202         }
2203         spin_unlock_irqrestore(&qp->q_lock, flags);
2204         return status;
2205 }
2206
2207 /* cqe for srq's rqe can potentially arrive out of order.
2208  * index gives the entry in the shadow table where to store
2209  * the wr_id. tag/index is returned in cqe to reference back
2210  * for a given rqe.
2211  */
2212 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2213 {
2214         int row = 0;
2215         int indx = 0;
2216
2217         for (row = 0; row < srq->bit_fields_len; row++) {
2218                 if (srq->idx_bit_fields[row]) {
2219                         indx = ffs(srq->idx_bit_fields[row]);
2220                         indx = (row * 32) + (indx - 1);
2221                         if (indx >= srq->rq.max_cnt)
2222                                 BUG();
2223                         ocrdma_srq_toggle_bit(srq, indx);
2224                         break;
2225                 }
2226         }
2227
2228         if (row == srq->bit_fields_len)
2229                 BUG();
2230         return indx + 1; /* Use from index 1 */
2231 }
2232
2233 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2234 {
2235         u32 val = srq->rq.dbid | (1 << 16);
2236
2237         iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2238 }
2239
2240 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2241                          struct ib_recv_wr **bad_wr)
2242 {
2243         int status = 0;
2244         unsigned long flags;
2245         struct ocrdma_srq *srq;
2246         struct ocrdma_hdr_wqe *rqe;
2247         u16 tag;
2248
2249         srq = get_ocrdma_srq(ibsrq);
2250
2251         spin_lock_irqsave(&srq->q_lock, flags);
2252         while (wr) {
2253                 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2254                     wr->num_sge > srq->rq.max_sges) {
2255                         status = -ENOMEM;
2256                         *bad_wr = wr;
2257                         break;
2258                 }
2259                 tag = ocrdma_srq_get_idx(srq);
2260                 rqe = ocrdma_hwq_head(&srq->rq);
2261                 ocrdma_build_rqe(rqe, wr, tag);
2262
2263                 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2264                 /* make sure rqe is written before adapter can perform DMA */
2265                 wmb();
2266                 /* inform hw to start processing it */
2267                 ocrdma_ring_srq_db(srq);
2268                 /* update pointer, counter for next wr */
2269                 ocrdma_hwq_inc_head(&srq->rq);
2270                 wr = wr->next;
2271         }
2272         spin_unlock_irqrestore(&srq->q_lock, flags);
2273         return status;
2274 }
2275
2276 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2277 {
2278         enum ib_wc_status ibwc_status;
2279
2280         switch (status) {
2281         case OCRDMA_CQE_GENERAL_ERR:
2282                 ibwc_status = IB_WC_GENERAL_ERR;
2283                 break;
2284         case OCRDMA_CQE_LOC_LEN_ERR:
2285                 ibwc_status = IB_WC_LOC_LEN_ERR;
2286                 break;
2287         case OCRDMA_CQE_LOC_QP_OP_ERR:
2288                 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2289                 break;
2290         case OCRDMA_CQE_LOC_EEC_OP_ERR:
2291                 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2292                 break;
2293         case OCRDMA_CQE_LOC_PROT_ERR:
2294                 ibwc_status = IB_WC_LOC_PROT_ERR;
2295                 break;
2296         case OCRDMA_CQE_WR_FLUSH_ERR:
2297                 ibwc_status = IB_WC_WR_FLUSH_ERR;
2298                 break;
2299         case OCRDMA_CQE_MW_BIND_ERR:
2300                 ibwc_status = IB_WC_MW_BIND_ERR;
2301                 break;
2302         case OCRDMA_CQE_BAD_RESP_ERR:
2303                 ibwc_status = IB_WC_BAD_RESP_ERR;
2304                 break;
2305         case OCRDMA_CQE_LOC_ACCESS_ERR:
2306                 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2307                 break;
2308         case OCRDMA_CQE_REM_INV_REQ_ERR:
2309                 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2310                 break;
2311         case OCRDMA_CQE_REM_ACCESS_ERR:
2312                 ibwc_status = IB_WC_REM_ACCESS_ERR;
2313                 break;
2314         case OCRDMA_CQE_REM_OP_ERR:
2315                 ibwc_status = IB_WC_REM_OP_ERR;
2316                 break;
2317         case OCRDMA_CQE_RETRY_EXC_ERR:
2318                 ibwc_status = IB_WC_RETRY_EXC_ERR;
2319                 break;
2320         case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2321                 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2322                 break;
2323         case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2324                 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2325                 break;
2326         case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2327                 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2328                 break;
2329         case OCRDMA_CQE_REM_ABORT_ERR:
2330                 ibwc_status = IB_WC_REM_ABORT_ERR;
2331                 break;
2332         case OCRDMA_CQE_INV_EECN_ERR:
2333                 ibwc_status = IB_WC_INV_EECN_ERR;
2334                 break;
2335         case OCRDMA_CQE_INV_EEC_STATE_ERR:
2336                 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2337                 break;
2338         case OCRDMA_CQE_FATAL_ERR:
2339                 ibwc_status = IB_WC_FATAL_ERR;
2340                 break;
2341         case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2342                 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2343                 break;
2344         default:
2345                 ibwc_status = IB_WC_GENERAL_ERR;
2346                 break;
2347         }
2348         return ibwc_status;
2349 }
2350
2351 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2352                       u32 wqe_idx)
2353 {
2354         struct ocrdma_hdr_wqe *hdr;
2355         struct ocrdma_sge *rw;
2356         int opcode;
2357
2358         hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2359
2360         ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2361         /* Undo the hdr->cw swap */
2362         opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2363         switch (opcode) {
2364         case OCRDMA_WRITE:
2365                 ibwc->opcode = IB_WC_RDMA_WRITE;
2366                 break;
2367         case OCRDMA_READ:
2368                 rw = (struct ocrdma_sge *)(hdr + 1);
2369                 ibwc->opcode = IB_WC_RDMA_READ;
2370                 ibwc->byte_len = rw->len;
2371                 break;
2372         case OCRDMA_SEND:
2373                 ibwc->opcode = IB_WC_SEND;
2374                 break;
2375         case OCRDMA_FR_MR:
2376                 ibwc->opcode = IB_WC_FAST_REG_MR;
2377                 break;
2378         case OCRDMA_LKEY_INV:
2379                 ibwc->opcode = IB_WC_LOCAL_INV;
2380                 break;
2381         default:
2382                 ibwc->status = IB_WC_GENERAL_ERR;
2383                 pr_err("%s() invalid opcode received = 0x%x\n",
2384                        __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2385                 break;
2386         }
2387 }
2388
2389 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2390                                                 struct ocrdma_cqe *cqe)
2391 {
2392         if (is_cqe_for_sq(cqe)) {
2393                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2394                                 cqe->flags_status_srcqpn) &
2395                                         ~OCRDMA_CQE_STATUS_MASK);
2396                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2397                                 cqe->flags_status_srcqpn) |
2398                                 (OCRDMA_CQE_WR_FLUSH_ERR <<
2399                                         OCRDMA_CQE_STATUS_SHIFT));
2400         } else {
2401                 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2402                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2403                                         cqe->flags_status_srcqpn) &
2404                                                 ~OCRDMA_CQE_UD_STATUS_MASK);
2405                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2406                                         cqe->flags_status_srcqpn) |
2407                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2408                                                 OCRDMA_CQE_UD_STATUS_SHIFT));
2409                 } else {
2410                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2411                                         cqe->flags_status_srcqpn) &
2412                                                 ~OCRDMA_CQE_STATUS_MASK);
2413                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2414                                         cqe->flags_status_srcqpn) |
2415                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2416                                                 OCRDMA_CQE_STATUS_SHIFT));
2417                 }
2418         }
2419 }
2420
2421 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2422                                   struct ocrdma_qp *qp, int status)
2423 {
2424         bool expand = false;
2425
2426         ibwc->byte_len = 0;
2427         ibwc->qp = &qp->ibqp;
2428         ibwc->status = ocrdma_to_ibwc_err(status);
2429
2430         ocrdma_flush_qp(qp);
2431         ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2432
2433         /* if wqe/rqe pending for which cqe needs to be returned,
2434          * trigger inflating it.
2435          */
2436         if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2437                 expand = true;
2438                 ocrdma_set_cqe_status_flushed(qp, cqe);
2439         }
2440         return expand;
2441 }
2442
2443 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2444                                   struct ocrdma_qp *qp, int status)
2445 {
2446         ibwc->opcode = IB_WC_RECV;
2447         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2448         ocrdma_hwq_inc_tail(&qp->rq);
2449
2450         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2451 }
2452
2453 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2454                                   struct ocrdma_qp *qp, int status)
2455 {
2456         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2457         ocrdma_hwq_inc_tail(&qp->sq);
2458
2459         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2460 }
2461
2462
2463 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2464                                  struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2465                                  bool *polled, bool *stop)
2466 {
2467         bool expand;
2468         int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2469                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2470
2471         /* when hw sq is empty, but rq is not empty, so we continue
2472          * to keep the cqe in order to get the cq event again.
2473          */
2474         if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2475                 /* when cq for rq and sq is same, it is safe to return
2476                  * flush cqe for RQEs.
2477                  */
2478                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2479                         *polled = true;
2480                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2481                         expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2482                 } else {
2483                         /* stop processing further cqe as this cqe is used for
2484                          * triggering cq event on buddy cq of RQ.
2485                          * When QP is destroyed, this cqe will be removed
2486                          * from the cq's hardware q.
2487                          */
2488                         *polled = false;
2489                         *stop = true;
2490                         expand = false;
2491                 }
2492         } else {
2493                 *polled = true;
2494                 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2495         }
2496         return expand;
2497 }
2498
2499 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2500                                      struct ocrdma_cqe *cqe,
2501                                      struct ib_wc *ibwc, bool *polled)
2502 {
2503         bool expand = false;
2504         int tail = qp->sq.tail;
2505         u32 wqe_idx;
2506
2507         if (!qp->wqe_wr_id_tbl[tail].signaled) {
2508                 *polled = false;    /* WC cannot be consumed yet */
2509         } else {
2510                 ibwc->status = IB_WC_SUCCESS;
2511                 ibwc->wc_flags = 0;
2512                 ibwc->qp = &qp->ibqp;
2513                 ocrdma_update_wc(qp, ibwc, tail);
2514                 *polled = true;
2515         }
2516         wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2517                         OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2518         if (tail != wqe_idx)
2519                 expand = true; /* Coalesced CQE can't be consumed yet */
2520
2521         ocrdma_hwq_inc_tail(&qp->sq);
2522         return expand;
2523 }
2524
2525 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2526                              struct ib_wc *ibwc, bool *polled, bool *stop)
2527 {
2528         int status;
2529         bool expand;
2530
2531         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2532                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2533
2534         if (status == OCRDMA_CQE_SUCCESS)
2535                 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2536         else
2537                 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2538         return expand;
2539 }
2540
2541 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2542 {
2543         int status;
2544
2545         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2546                 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2547         ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2548                                                 OCRDMA_CQE_SRCQP_MASK;
2549         ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2550                                                 OCRDMA_CQE_PKEY_MASK;
2551         ibwc->wc_flags = IB_WC_GRH;
2552         ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2553                                         OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2554         return status;
2555 }
2556
2557 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2558                                        struct ocrdma_cqe *cqe,
2559                                        struct ocrdma_qp *qp)
2560 {
2561         unsigned long flags;
2562         struct ocrdma_srq *srq;
2563         u32 wqe_idx;
2564
2565         srq = get_ocrdma_srq(qp->ibqp.srq);
2566         wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2567                 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2568         if (wqe_idx < 1)
2569                 BUG();
2570
2571         ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2572         spin_lock_irqsave(&srq->q_lock, flags);
2573         ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2574         spin_unlock_irqrestore(&srq->q_lock, flags);
2575         ocrdma_hwq_inc_tail(&srq->rq);
2576 }
2577
2578 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2579                                 struct ib_wc *ibwc, bool *polled, bool *stop,
2580                                 int status)
2581 {
2582         bool expand;
2583
2584         /* when hw_rq is empty, but wq is not empty, so continue
2585          * to keep the cqe to get the cq event again.
2586          */
2587         if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2588                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2589                         *polled = true;
2590                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2591                         expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2592                 } else {
2593                         *polled = false;
2594                         *stop = true;
2595                         expand = false;
2596                 }
2597         } else {
2598                 *polled = true;
2599                 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2600         }
2601         return expand;
2602 }
2603
2604 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2605                                      struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2606 {
2607         ibwc->opcode = IB_WC_RECV;
2608         ibwc->qp = &qp->ibqp;
2609         ibwc->status = IB_WC_SUCCESS;
2610
2611         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2612                 ocrdma_update_ud_rcqe(ibwc, cqe);
2613         else
2614                 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2615
2616         if (is_cqe_imm(cqe)) {
2617                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2618                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2619         } else if (is_cqe_wr_imm(cqe)) {
2620                 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2621                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2622                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2623         } else if (is_cqe_invalidated(cqe)) {
2624                 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2625                 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2626         }
2627         if (qp->ibqp.srq) {
2628                 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2629         } else {
2630                 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2631                 ocrdma_hwq_inc_tail(&qp->rq);
2632         }
2633 }
2634
2635 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2636                              struct ib_wc *ibwc, bool *polled, bool *stop)
2637 {
2638         int status;
2639         bool expand = false;
2640
2641         ibwc->wc_flags = 0;
2642         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2643                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2644                                         OCRDMA_CQE_UD_STATUS_MASK) >>
2645                                         OCRDMA_CQE_UD_STATUS_SHIFT;
2646         } else {
2647                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2648                              OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2649         }
2650
2651         if (status == OCRDMA_CQE_SUCCESS) {
2652                 *polled = true;
2653                 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2654         } else {
2655                 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2656                                               status);
2657         }
2658         return expand;
2659 }
2660
2661 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2662                                    u16 cur_getp)
2663 {
2664         if (cq->phase_change) {
2665                 if (cur_getp == 0)
2666                         cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2667         } else {
2668                 /* clear valid bit */
2669                 cqe->flags_status_srcqpn = 0;
2670         }
2671 }
2672
2673 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2674                             struct ib_wc *ibwc)
2675 {
2676         u16 qpn = 0;
2677         int i = 0;
2678         bool expand = false;
2679         int polled_hw_cqes = 0;
2680         struct ocrdma_qp *qp = NULL;
2681         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2682         struct ocrdma_cqe *cqe;
2683         u16 cur_getp; bool polled = false; bool stop = false;
2684
2685         cur_getp = cq->getp;
2686         while (num_entries) {
2687                 cqe = cq->va + cur_getp;
2688                 /* check whether valid cqe or not */
2689                 if (!is_cqe_valid(cq, cqe))
2690                         break;
2691                 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2692                 /* ignore discarded cqe */
2693                 if (qpn == 0)
2694                         goto skip_cqe;
2695                 qp = dev->qp_tbl[qpn];
2696                 BUG_ON(qp == NULL);
2697
2698                 if (is_cqe_for_sq(cqe)) {
2699                         expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2700                                                   &stop);
2701                 } else {
2702                         expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2703                                                   &stop);
2704                 }
2705                 if (expand)
2706                         goto expand_cqe;
2707                 if (stop)
2708                         goto stop_cqe;
2709                 /* clear qpn to avoid duplicate processing by discard_cqe() */
2710                 cqe->cmn.qpn = 0;
2711 skip_cqe:
2712                 polled_hw_cqes += 1;
2713                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2714                 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2715 expand_cqe:
2716                 if (polled) {
2717                         num_entries -= 1;
2718                         i += 1;
2719                         ibwc = ibwc + 1;
2720                         polled = false;
2721                 }
2722         }
2723 stop_cqe:
2724         cq->getp = cur_getp;
2725         if (cq->deferred_arm) {
2726                 ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
2727                                   polled_hw_cqes);
2728                 cq->deferred_arm = false;
2729                 cq->deferred_sol = false;
2730         } else {
2731                 /* We need to pop the CQE. No need to arm */
2732                 ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
2733                                   polled_hw_cqes);
2734                 cq->deferred_sol = false;
2735         }
2736
2737         return i;
2738 }
2739
2740 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2741 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2742                               struct ocrdma_qp *qp, struct ib_wc *ibwc)
2743 {
2744         int err_cqes = 0;
2745
2746         while (num_entries) {
2747                 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2748                         break;
2749                 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2750                         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2751                         ocrdma_hwq_inc_tail(&qp->sq);
2752                 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2753                         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2754                         ocrdma_hwq_inc_tail(&qp->rq);
2755                 } else {
2756                         return err_cqes;
2757                 }
2758                 ibwc->byte_len = 0;
2759                 ibwc->status = IB_WC_WR_FLUSH_ERR;
2760                 ibwc = ibwc + 1;
2761                 err_cqes += 1;
2762                 num_entries -= 1;
2763         }
2764         return err_cqes;
2765 }
2766
2767 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2768 {
2769         int cqes_to_poll = num_entries;
2770         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2771         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2772         int num_os_cqe = 0, err_cqes = 0;
2773         struct ocrdma_qp *qp;
2774         unsigned long flags;
2775
2776         /* poll cqes from adapter CQ */
2777         spin_lock_irqsave(&cq->cq_lock, flags);
2778         num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2779         spin_unlock_irqrestore(&cq->cq_lock, flags);
2780         cqes_to_poll -= num_os_cqe;
2781
2782         if (cqes_to_poll) {
2783                 wc = wc + num_os_cqe;
2784                 /* adapter returns single error cqe when qp moves to
2785                  * error state. So insert error cqes with wc_status as
2786                  * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2787                  * respectively which uses this CQ.
2788                  */
2789                 spin_lock_irqsave(&dev->flush_q_lock, flags);
2790                 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2791                         if (cqes_to_poll == 0)
2792                                 break;
2793                         err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2794                         cqes_to_poll -= err_cqes;
2795                         num_os_cqe += err_cqes;
2796                         wc = wc + err_cqes;
2797                 }
2798                 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2799         }
2800         return num_os_cqe;
2801 }
2802
2803 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2804 {
2805         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2806         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2807         u16 cq_id;
2808         unsigned long flags;
2809         bool arm_needed = false, sol_needed = false;
2810
2811         cq_id = cq->id;
2812
2813         spin_lock_irqsave(&cq->cq_lock, flags);
2814         if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2815                 arm_needed = true;
2816         if (cq_flags & IB_CQ_SOLICITED)
2817                 sol_needed = true;
2818
2819         if (cq->first_arm) {
2820                 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2821                 cq->first_arm = false;
2822                 goto skip_defer;
2823         }
2824         cq->deferred_arm = true;
2825
2826 skip_defer:
2827         cq->deferred_sol = sol_needed;
2828         spin_unlock_irqrestore(&cq->cq_lock, flags);
2829
2830         return 0;
2831 }
2832
2833 struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
2834 {
2835         int status;
2836         struct ocrdma_mr *mr;
2837         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2838         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2839
2840         if (max_page_list_len > dev->attr.max_pages_per_frmr)
2841                 return ERR_PTR(-EINVAL);
2842
2843         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2844         if (!mr)
2845                 return ERR_PTR(-ENOMEM);
2846
2847         status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
2848         if (status)
2849                 goto pbl_err;
2850         mr->hwmr.fr_mr = 1;
2851         mr->hwmr.remote_rd = 0;
2852         mr->hwmr.remote_wr = 0;
2853         mr->hwmr.local_rd = 0;
2854         mr->hwmr.local_wr = 0;
2855         mr->hwmr.mw_bind = 0;
2856         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2857         if (status)
2858                 goto pbl_err;
2859         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2860         if (status)
2861                 goto mbx_err;
2862         mr->ibmr.rkey = mr->hwmr.lkey;
2863         mr->ibmr.lkey = mr->hwmr.lkey;
2864         dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
2865                 (unsigned long) mr;
2866         return &mr->ibmr;
2867 mbx_err:
2868         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2869 pbl_err:
2870         kfree(mr);
2871         return ERR_PTR(-ENOMEM);
2872 }
2873
2874 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
2875                                                           *ibdev,
2876                                                           int page_list_len)
2877 {
2878         struct ib_fast_reg_page_list *frmr_list;
2879         int size;
2880
2881         size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
2882         frmr_list = kzalloc(size, GFP_KERNEL);
2883         if (!frmr_list)
2884                 return ERR_PTR(-ENOMEM);
2885         frmr_list->page_list = (u64 *)(frmr_list + 1);
2886         return frmr_list;
2887 }
2888
2889 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
2890 {
2891         kfree(page_list);
2892 }
2893
2894 #define MAX_KERNEL_PBE_SIZE 65536
2895 static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
2896                                     int buf_cnt, u32 *pbe_size)
2897 {
2898         u64 total_size = 0;
2899         u64 buf_size = 0;
2900         int i;
2901         *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
2902         *pbe_size = roundup_pow_of_two(*pbe_size);
2903
2904         /* find the smallest PBE size that we can have */
2905         for (i = 0; i < buf_cnt; i++) {
2906                 /* first addr may not be page aligned, so ignore checking */
2907                 if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
2908                                  (buf_list[i].size & ~PAGE_MASK))) {
2909                         return 0;
2910                 }
2911
2912                 /* if configured PBE size is greater then the chosen one,
2913                  * reduce the PBE size.
2914                  */
2915                 buf_size = roundup(buf_list[i].size, PAGE_SIZE);
2916                 /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
2917                 buf_size = roundup_pow_of_two(buf_size);
2918                 if (*pbe_size > buf_size)
2919                         *pbe_size = buf_size;
2920
2921                 total_size += buf_size;
2922         }
2923         *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
2924             (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
2925
2926         /* num_pbes = total_size / (*pbe_size);  this is implemented below. */
2927
2928         return total_size >> ilog2(*pbe_size);
2929 }
2930
2931 static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
2932                               u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
2933                               struct ocrdma_hw_mr *hwmr)
2934 {
2935         int i;
2936         int idx;
2937         int pbes_per_buf = 0;
2938         u64 buf_addr = 0;
2939         int num_pbes;
2940         struct ocrdma_pbe *pbe;
2941         int total_num_pbes = 0;
2942
2943         if (!hwmr->num_pbes)
2944                 return;
2945
2946         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2947         num_pbes = 0;
2948
2949         /* go through the OS phy regions & fill hw pbe entries into pbls. */
2950         for (i = 0; i < ib_buf_cnt; i++) {
2951                 buf_addr = buf_list[i].addr;
2952                 pbes_per_buf =
2953                     roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
2954                     pbe_size;
2955                 hwmr->len += buf_list[i].size;
2956                 /* number of pbes can be more for one OS buf, when
2957                  * buffers are of different sizes.
2958                  * split the ib_buf to one or more pbes.
2959                  */
2960                 for (idx = 0; idx < pbes_per_buf; idx++) {
2961                         /* we program always page aligned addresses,
2962                          * first unaligned address is taken care by fbo.
2963                          */
2964                         if (i == 0) {
2965                                 /* for non zero fbo, assign the
2966                                  * start of the page.
2967                                  */
2968                                 pbe->pa_lo =
2969                                     cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2970                                 pbe->pa_hi =
2971                                     cpu_to_le32((u32) upper_32_bits(buf_addr));
2972                         } else {
2973                                 pbe->pa_lo =
2974                                     cpu_to_le32((u32) (buf_addr & 0xffffffff));
2975                                 pbe->pa_hi =
2976                                     cpu_to_le32((u32) upper_32_bits(buf_addr));
2977                         }
2978                         buf_addr += pbe_size;
2979                         num_pbes += 1;
2980                         total_num_pbes += 1;
2981                         pbe++;
2982
2983                         if (total_num_pbes == hwmr->num_pbes)
2984                                 goto mr_tbl_done;
2985                         /* if the pbl is full storing the pbes,
2986                          * move to next pbl.
2987                          */
2988                         if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2989                                 pbl_tbl++;
2990                                 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2991                                 num_pbes = 0;
2992                         }
2993                 }
2994         }
2995 mr_tbl_done:
2996         return;
2997 }
2998
2999 struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
3000                                    struct ib_phys_buf *buf_list,
3001                                    int buf_cnt, int acc, u64 *iova_start)
3002 {
3003         int status = -ENOMEM;
3004         struct ocrdma_mr *mr;
3005         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3006         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3007         u32 num_pbes;
3008         u32 pbe_size = 0;
3009
3010         if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
3011                 return ERR_PTR(-EINVAL);
3012
3013         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3014         if (!mr)
3015                 return ERR_PTR(status);
3016
3017         num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
3018         if (num_pbes == 0) {
3019                 status = -EINVAL;
3020                 goto pbl_err;
3021         }
3022         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3023         if (status)
3024                 goto pbl_err;
3025
3026         mr->hwmr.pbe_size = pbe_size;
3027         mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3028         mr->hwmr.va = *iova_start;
3029         mr->hwmr.local_rd = 1;
3030         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3031         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3032         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3033         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3034         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3035
3036         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3037         if (status)
3038                 goto pbl_err;
3039         build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3040                           &mr->hwmr);
3041         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3042         if (status)
3043                 goto mbx_err;
3044
3045         mr->ibmr.lkey = mr->hwmr.lkey;
3046         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3047                 mr->ibmr.rkey = mr->hwmr.lkey;
3048         return &mr->ibmr;
3049
3050 mbx_err:
3051         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3052 pbl_err:
3053         kfree(mr);
3054         return ERR_PTR(status);
3055 }