Merge tag 'core-rcu-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb4 / qp.c
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/module.h>
34 #include <rdma/uverbs_ioctl.h>
35
36 #include "iw_cxgb4.h"
37
38 static int db_delay_usecs = 1;
39 module_param(db_delay_usecs, int, 0644);
40 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41
42 static int ocqp_support = 1;
43 module_param(ocqp_support, int, 0644);
44 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
45
46 int db_fc_threshold = 1000;
47 module_param(db_fc_threshold, int, 0644);
48 MODULE_PARM_DESC(db_fc_threshold,
49                  "QP count/threshold that triggers"
50                  " automatic db flow control mode (default = 1000)");
51
52 int db_coalescing_threshold;
53 module_param(db_coalescing_threshold, int, 0644);
54 MODULE_PARM_DESC(db_coalescing_threshold,
55                  "QP count/threshold that triggers"
56                  " disabling db coalescing (default = 0)");
57
58 static int max_fr_immd = T4_MAX_FR_IMMD;
59 module_param(max_fr_immd, int, 0644);
60 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
61
62 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
63 {
64         int ret = 0;
65
66         xa_lock_irq(&dev->qps);
67         if (ird <= dev->avail_ird)
68                 dev->avail_ird -= ird;
69         else
70                 ret = -ENOMEM;
71         xa_unlock_irq(&dev->qps);
72
73         if (ret)
74                 dev_warn(&dev->rdev.lldi.pdev->dev,
75                          "device IRD resources exhausted\n");
76
77         return ret;
78 }
79
80 static void free_ird(struct c4iw_dev *dev, int ird)
81 {
82         xa_lock_irq(&dev->qps);
83         dev->avail_ird += ird;
84         xa_unlock_irq(&dev->qps);
85 }
86
87 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
88 {
89         unsigned long flag;
90         spin_lock_irqsave(&qhp->lock, flag);
91         qhp->attr.state = state;
92         spin_unlock_irqrestore(&qhp->lock, flag);
93 }
94
95 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
96 {
97         c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
98 }
99
100 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
101 {
102         dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
103                           dma_unmap_addr(sq, mapping));
104 }
105
106 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
107 {
108         if (t4_sq_onchip(sq))
109                 dealloc_oc_sq(rdev, sq);
110         else
111                 dealloc_host_sq(rdev, sq);
112 }
113
114 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
115 {
116         if (!ocqp_support || !ocqp_supported(&rdev->lldi))
117                 return -ENOSYS;
118         sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
119         if (!sq->dma_addr)
120                 return -ENOMEM;
121         sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
122                         rdev->lldi.vr->ocq.start;
123         sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
124                                             rdev->lldi.vr->ocq.start);
125         sq->flags |= T4_SQ_ONCHIP;
126         return 0;
127 }
128
129 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
130 {
131         sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
132                                        &(sq->dma_addr), GFP_KERNEL);
133         if (!sq->queue)
134                 return -ENOMEM;
135         sq->phys_addr = virt_to_phys(sq->queue);
136         dma_unmap_addr_set(sq, mapping, sq->dma_addr);
137         return 0;
138 }
139
140 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
141 {
142         int ret = -ENOSYS;
143         if (user)
144                 ret = alloc_oc_sq(rdev, sq);
145         if (ret)
146                 ret = alloc_host_sq(rdev, sq);
147         return ret;
148 }
149
150 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
151                       struct c4iw_dev_ucontext *uctx, int has_rq)
152 {
153         /*
154          * uP clears EQ contexts when the connection exits rdma mode,
155          * so no need to post a RESET WR for these EQs.
156          */
157         dealloc_sq(rdev, &wq->sq);
158         kfree(wq->sq.sw_sq);
159         c4iw_put_qpid(rdev, wq->sq.qid, uctx);
160
161         if (has_rq) {
162                 dma_free_coherent(&rdev->lldi.pdev->dev,
163                                   wq->rq.memsize, wq->rq.queue,
164                                   dma_unmap_addr(&wq->rq, mapping));
165                 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
166                 kfree(wq->rq.sw_rq);
167                 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
168         }
169         return 0;
170 }
171
172 /*
173  * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
174  * then this is a user mapping so compute the page-aligned physical address
175  * for mapping.
176  */
177 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
178                               enum cxgb4_bar2_qtype qtype,
179                               unsigned int *pbar2_qid, u64 *pbar2_pa)
180 {
181         u64 bar2_qoffset;
182         int ret;
183
184         ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
185                                    pbar2_pa ? 1 : 0,
186                                    &bar2_qoffset, pbar2_qid);
187         if (ret)
188                 return NULL;
189
190         if (pbar2_pa)
191                 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
192
193         if (is_t4(rdev->lldi.adapter_type))
194                 return NULL;
195
196         return rdev->bar2_kva + bar2_qoffset;
197 }
198
199 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
200                      struct t4_cq *rcq, struct t4_cq *scq,
201                      struct c4iw_dev_ucontext *uctx,
202                      struct c4iw_wr_wait *wr_waitp,
203                      int need_rq)
204 {
205         int user = (uctx != &rdev->uctx);
206         struct fw_ri_res_wr *res_wr;
207         struct fw_ri_res *res;
208         int wr_len;
209         struct sk_buff *skb;
210         int ret = 0;
211         int eqsize;
212
213         wq->sq.qid = c4iw_get_qpid(rdev, uctx);
214         if (!wq->sq.qid)
215                 return -ENOMEM;
216
217         if (need_rq) {
218                 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
219                 if (!wq->rq.qid) {
220                         ret = -ENOMEM;
221                         goto free_sq_qid;
222                 }
223         }
224
225         if (!user) {
226                 wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq),
227                                        GFP_KERNEL);
228                 if (!wq->sq.sw_sq) {
229                         ret = -ENOMEM;
230                         goto free_rq_qid;//FIXME
231                 }
232
233                 if (need_rq) {
234                         wq->rq.sw_rq = kcalloc(wq->rq.size,
235                                                sizeof(*wq->rq.sw_rq),
236                                                GFP_KERNEL);
237                         if (!wq->rq.sw_rq) {
238                                 ret = -ENOMEM;
239                                 goto free_sw_sq;
240                         }
241                 }
242         }
243
244         if (need_rq) {
245                 /*
246                  * RQT must be a power of 2 and at least 16 deep.
247                  */
248                 wq->rq.rqt_size =
249                         roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
250                 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
251                 if (!wq->rq.rqt_hwaddr) {
252                         ret = -ENOMEM;
253                         goto free_sw_rq;
254                 }
255         }
256
257         ret = alloc_sq(rdev, &wq->sq, user);
258         if (ret)
259                 goto free_hwaddr;
260         memset(wq->sq.queue, 0, wq->sq.memsize);
261         dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
262
263         if (need_rq) {
264                 wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
265                                                   wq->rq.memsize,
266                                                   &wq->rq.dma_addr,
267                                                   GFP_KERNEL);
268                 if (!wq->rq.queue) {
269                         ret = -ENOMEM;
270                         goto free_sq;
271                 }
272                 pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
273                          wq->sq.queue,
274                          (unsigned long long)virt_to_phys(wq->sq.queue),
275                          wq->rq.queue,
276                          (unsigned long long)virt_to_phys(wq->rq.queue));
277                 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
278         }
279
280         wq->db = rdev->lldi.db_reg;
281
282         wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
283                                          CXGB4_BAR2_QTYPE_EGRESS,
284                                          &wq->sq.bar2_qid,
285                                          user ? &wq->sq.bar2_pa : NULL);
286         if (need_rq)
287                 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
288                                                  CXGB4_BAR2_QTYPE_EGRESS,
289                                                  &wq->rq.bar2_qid,
290                                                  user ? &wq->rq.bar2_pa : NULL);
291
292         /*
293          * User mode must have bar2 access.
294          */
295         if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
296                 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
297                         pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
298                 goto free_dma;
299         }
300
301         wq->rdev = rdev;
302         wq->rq.msn = 1;
303
304         /* build fw_ri_res_wr */
305         wr_len = sizeof(*res_wr) + 2 * sizeof(*res);
306         if (need_rq)
307                 wr_len += sizeof(*res);
308         skb = alloc_skb(wr_len, GFP_KERNEL);
309         if (!skb) {
310                 ret = -ENOMEM;
311                 goto free_dma;
312         }
313         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
314
315         res_wr = __skb_put_zero(skb, wr_len);
316         res_wr->op_nres = cpu_to_be32(
317                         FW_WR_OP_V(FW_RI_RES_WR) |
318                         FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
319                         FW_WR_COMPL_F);
320         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
321         res_wr->cookie = (uintptr_t)wr_waitp;
322         res = res_wr->res;
323         res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
324         res->u.sqrq.op = FW_RI_RES_OP_WRITE;
325
326         /*
327          * eqsize is the number of 64B entries plus the status page size.
328          */
329         eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
330                 rdev->hw_queue.t4_eq_status_entries;
331
332         res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
333                 FW_RI_RES_WR_HOSTFCMODE_V(0) |  /* no host cidx updates */
334                 FW_RI_RES_WR_CPRIO_V(0) |       /* don't keep in chip cache */
335                 FW_RI_RES_WR_PCIECHN_V(0) |     /* set by uP at ri_init time */
336                 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
337                 FW_RI_RES_WR_IQID_V(scq->cqid));
338         res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
339                 FW_RI_RES_WR_DCAEN_V(0) |
340                 FW_RI_RES_WR_DCACPU_V(0) |
341                 FW_RI_RES_WR_FBMIN_V(2) |
342                 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
343                                          FW_RI_RES_WR_FBMAX_V(3)) |
344                 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
345                 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
346                 FW_RI_RES_WR_EQSIZE_V(eqsize));
347         res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
348         res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
349
350         if (need_rq) {
351                 res++;
352                 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
353                 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
354
355                 /*
356                  * eqsize is the number of 64B entries plus the status page size
357                  */
358                 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
359                         rdev->hw_queue.t4_eq_status_entries;
360                 res->u.sqrq.fetchszm_to_iqid =
361                         /* no host cidx updates */
362                         cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
363                         /* don't keep in chip cache */
364                         FW_RI_RES_WR_CPRIO_V(0) |
365                         /* set by uP at ri_init time */
366                         FW_RI_RES_WR_PCIECHN_V(0) |
367                         FW_RI_RES_WR_IQID_V(rcq->cqid));
368                 res->u.sqrq.dcaen_to_eqsize =
369                         cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
370                         FW_RI_RES_WR_DCACPU_V(0) |
371                         FW_RI_RES_WR_FBMIN_V(2) |
372                         FW_RI_RES_WR_FBMAX_V(3) |
373                         FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
374                         FW_RI_RES_WR_CIDXFTHRESH_V(0) |
375                         FW_RI_RES_WR_EQSIZE_V(eqsize));
376                 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
377                 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
378         }
379
380         c4iw_init_wr_wait(wr_waitp);
381         ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
382         if (ret)
383                 goto free_dma;
384
385         pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
386                  wq->sq.qid, wq->rq.qid, wq->db,
387                  wq->sq.bar2_va, wq->rq.bar2_va);
388
389         return 0;
390 free_dma:
391         if (need_rq)
392                 dma_free_coherent(&rdev->lldi.pdev->dev,
393                                   wq->rq.memsize, wq->rq.queue,
394                                   dma_unmap_addr(&wq->rq, mapping));
395 free_sq:
396         dealloc_sq(rdev, &wq->sq);
397 free_hwaddr:
398         if (need_rq)
399                 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
400 free_sw_rq:
401         if (need_rq)
402                 kfree(wq->rq.sw_rq);
403 free_sw_sq:
404         kfree(wq->sq.sw_sq);
405 free_rq_qid:
406         if (need_rq)
407                 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
408 free_sq_qid:
409         c4iw_put_qpid(rdev, wq->sq.qid, uctx);
410         return ret;
411 }
412
413 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
414                       const struct ib_send_wr *wr, int max, u32 *plenp)
415 {
416         u8 *dstp, *srcp;
417         u32 plen = 0;
418         int i;
419         int rem, len;
420
421         dstp = (u8 *)immdp->data;
422         for (i = 0; i < wr->num_sge; i++) {
423                 if ((plen + wr->sg_list[i].length) > max)
424                         return -EMSGSIZE;
425                 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
426                 plen += wr->sg_list[i].length;
427                 rem = wr->sg_list[i].length;
428                 while (rem) {
429                         if (dstp == (u8 *)&sq->queue[sq->size])
430                                 dstp = (u8 *)sq->queue;
431                         if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
432                                 len = rem;
433                         else
434                                 len = (u8 *)&sq->queue[sq->size] - dstp;
435                         memcpy(dstp, srcp, len);
436                         dstp += len;
437                         srcp += len;
438                         rem -= len;
439                 }
440         }
441         len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp));
442         if (len)
443                 memset(dstp, 0, len);
444         immdp->op = FW_RI_DATA_IMMD;
445         immdp->r1 = 0;
446         immdp->r2 = 0;
447         immdp->immdlen = cpu_to_be32(plen);
448         *plenp = plen;
449         return 0;
450 }
451
452 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
453                       struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
454                       int num_sge, u32 *plenp)
455
456 {
457         int i;
458         u32 plen = 0;
459         __be64 *flitp;
460
461         if ((__be64 *)isglp == queue_end)
462                 isglp = (struct fw_ri_isgl *)queue_start;
463
464         flitp = (__be64 *)isglp->sge;
465
466         for (i = 0; i < num_sge; i++) {
467                 if ((plen + sg_list[i].length) < plen)
468                         return -EMSGSIZE;
469                 plen += sg_list[i].length;
470                 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
471                                      sg_list[i].length);
472                 if (++flitp == queue_end)
473                         flitp = queue_start;
474                 *flitp = cpu_to_be64(sg_list[i].addr);
475                 if (++flitp == queue_end)
476                         flitp = queue_start;
477         }
478         *flitp = (__force __be64)0;
479         isglp->op = FW_RI_DATA_ISGL;
480         isglp->r1 = 0;
481         isglp->nsge = cpu_to_be16(num_sge);
482         isglp->r2 = 0;
483         if (plenp)
484                 *plenp = plen;
485         return 0;
486 }
487
488 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
489                            const struct ib_send_wr *wr, u8 *len16)
490 {
491         u32 plen;
492         int size;
493         int ret;
494
495         if (wr->num_sge > T4_MAX_SEND_SGE)
496                 return -EINVAL;
497         switch (wr->opcode) {
498         case IB_WR_SEND:
499                 if (wr->send_flags & IB_SEND_SOLICITED)
500                         wqe->send.sendop_pkd = cpu_to_be32(
501                                 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
502                 else
503                         wqe->send.sendop_pkd = cpu_to_be32(
504                                 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
505                 wqe->send.stag_inv = 0;
506                 break;
507         case IB_WR_SEND_WITH_INV:
508                 if (wr->send_flags & IB_SEND_SOLICITED)
509                         wqe->send.sendop_pkd = cpu_to_be32(
510                                 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
511                 else
512                         wqe->send.sendop_pkd = cpu_to_be32(
513                                 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
514                 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
515                 break;
516
517         default:
518                 return -EINVAL;
519         }
520         wqe->send.r3 = 0;
521         wqe->send.r4 = 0;
522
523         plen = 0;
524         if (wr->num_sge) {
525                 if (wr->send_flags & IB_SEND_INLINE) {
526                         ret = build_immd(sq, wqe->send.u.immd_src, wr,
527                                          T4_MAX_SEND_INLINE, &plen);
528                         if (ret)
529                                 return ret;
530                         size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) +
531                                plen;
532                 } else {
533                         ret = build_isgl((__be64 *)sq->queue,
534                                          (__be64 *)&sq->queue[sq->size],
535                                          wqe->send.u.isgl_src,
536                                          wr->sg_list, wr->num_sge, &plen);
537                         if (ret)
538                                 return ret;
539                         size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) +
540                                wr->num_sge * sizeof(struct fw_ri_sge);
541                 }
542         } else {
543                 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
544                 wqe->send.u.immd_src[0].r1 = 0;
545                 wqe->send.u.immd_src[0].r2 = 0;
546                 wqe->send.u.immd_src[0].immdlen = 0;
547                 size = sizeof(wqe->send) + sizeof(struct fw_ri_immd);
548                 plen = 0;
549         }
550         *len16 = DIV_ROUND_UP(size, 16);
551         wqe->send.plen = cpu_to_be32(plen);
552         return 0;
553 }
554
555 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
556                             const struct ib_send_wr *wr, u8 *len16)
557 {
558         u32 plen;
559         int size;
560         int ret;
561
562         if (wr->num_sge > T4_MAX_SEND_SGE)
563                 return -EINVAL;
564
565         /*
566          * iWARP protocol supports 64 bit immediate data but rdma api
567          * limits it to 32bit.
568          */
569         if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
570                 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
571         else
572                 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
573         wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
574         wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
575         if (wr->num_sge) {
576                 if (wr->send_flags & IB_SEND_INLINE) {
577                         ret = build_immd(sq, wqe->write.u.immd_src, wr,
578                                          T4_MAX_WRITE_INLINE, &plen);
579                         if (ret)
580                                 return ret;
581                         size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) +
582                                plen;
583                 } else {
584                         ret = build_isgl((__be64 *)sq->queue,
585                                          (__be64 *)&sq->queue[sq->size],
586                                          wqe->write.u.isgl_src,
587                                          wr->sg_list, wr->num_sge, &plen);
588                         if (ret)
589                                 return ret;
590                         size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) +
591                                wr->num_sge * sizeof(struct fw_ri_sge);
592                 }
593         } else {
594                 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
595                 wqe->write.u.immd_src[0].r1 = 0;
596                 wqe->write.u.immd_src[0].r2 = 0;
597                 wqe->write.u.immd_src[0].immdlen = 0;
598                 size = sizeof(wqe->write) + sizeof(struct fw_ri_immd);
599                 plen = 0;
600         }
601         *len16 = DIV_ROUND_UP(size, 16);
602         wqe->write.plen = cpu_to_be32(plen);
603         return 0;
604 }
605
606 static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
607                             struct ib_send_wr *wr)
608 {
609         memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
610         memset(immdp->r1, 0, 6);
611         immdp->op = FW_RI_DATA_IMMD;
612         immdp->immdlen = 16;
613 }
614
615 static void build_rdma_write_cmpl(struct t4_sq *sq,
616                                   struct fw_ri_rdma_write_cmpl_wr *wcwr,
617                                   const struct ib_send_wr *wr, u8 *len16)
618 {
619         u32 plen;
620         int size;
621
622         /*
623          * This code assumes the struct fields preceding the write isgl
624          * fit in one 64B WR slot.  This is because the WQE is built
625          * directly in the dma queue, and wrapping is only handled
626          * by the code buildling sgls.  IE the "fixed part" of the wr
627          * structs must all fit in 64B.  The WQE build code should probably be
628          * redesigned to avoid this restriction, but for now just add
629          * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
630          */
631         BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
632
633         wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
634         wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
635         if (wr->next->opcode == IB_WR_SEND)
636                 wcwr->stag_inv = 0;
637         else
638                 wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
639         wcwr->r2 = 0;
640         wcwr->r3 = 0;
641
642         /* SEND_INV SGL */
643         if (wr->next->send_flags & IB_SEND_INLINE)
644                 build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
645         else
646                 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
647                            &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
648
649         /* WRITE SGL */
650         build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
651                    wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
652
653         size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
654                 wr->num_sge * sizeof(struct fw_ri_sge);
655         wcwr->plen = cpu_to_be32(plen);
656         *len16 = DIV_ROUND_UP(size, 16);
657 }
658
659 static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
660                            u8 *len16)
661 {
662         if (wr->num_sge > 1)
663                 return -EINVAL;
664         if (wr->num_sge && wr->sg_list[0].length) {
665                 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
666                 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
667                                                         >> 32));
668                 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
669                 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
670                 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
671                 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
672                                                          >> 32));
673                 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
674         } else {
675                 wqe->read.stag_src = cpu_to_be32(2);
676                 wqe->read.to_src_hi = 0;
677                 wqe->read.to_src_lo = 0;
678                 wqe->read.stag_sink = cpu_to_be32(2);
679                 wqe->read.plen = 0;
680                 wqe->read.to_sink_hi = 0;
681                 wqe->read.to_sink_lo = 0;
682         }
683         wqe->read.r2 = 0;
684         wqe->read.r5 = 0;
685         *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16);
686         return 0;
687 }
688
689 static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
690 {
691         bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
692                              qhp->sq_sig_all;
693         bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
694                               qhp->sq_sig_all;
695         struct t4_swsqe *swsqe;
696         union t4_wr *wqe;
697         u16 write_wrid;
698         u8 len16;
699         u16 idx;
700
701         /*
702          * The sw_sq entries still look like a WRITE and a SEND and consume
703          * 2 slots. The FW WR, however, will be a single uber-WR.
704          */
705         wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
706                qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
707         build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
708
709         /* WRITE swsqe */
710         swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
711         swsqe->opcode = FW_RI_RDMA_WRITE;
712         swsqe->idx = qhp->wq.sq.pidx;
713         swsqe->complete = 0;
714         swsqe->signaled = write_signaled;
715         swsqe->flushed = 0;
716         swsqe->wr_id = wr->wr_id;
717         if (c4iw_wr_log) {
718                 swsqe->sge_ts =
719                         cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
720                 swsqe->host_time = ktime_get();
721         }
722
723         write_wrid = qhp->wq.sq.pidx;
724
725         /* just bump the sw_sq */
726         qhp->wq.sq.in_use++;
727         if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
728                 qhp->wq.sq.pidx = 0;
729
730         /* SEND_WITH_INV swsqe */
731         swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
732         if (wr->next->opcode == IB_WR_SEND)
733                 swsqe->opcode = FW_RI_SEND;
734         else
735                 swsqe->opcode = FW_RI_SEND_WITH_INV;
736         swsqe->idx = qhp->wq.sq.pidx;
737         swsqe->complete = 0;
738         swsqe->signaled = send_signaled;
739         swsqe->flushed = 0;
740         swsqe->wr_id = wr->next->wr_id;
741         if (c4iw_wr_log) {
742                 swsqe->sge_ts =
743                         cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
744                 swsqe->host_time = ktime_get();
745         }
746
747         wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
748         wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
749
750         init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
751                     write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
752         t4_sq_produce(&qhp->wq, len16);
753         idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
754
755         t4_ring_sq_db(&qhp->wq, idx, wqe);
756 }
757
758 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
759                            const struct ib_recv_wr *wr, u8 *len16)
760 {
761         int ret;
762
763         ret = build_isgl((__be64 *)qhp->wq.rq.queue,
764                          (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
765                          &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
766         if (ret)
767                 return ret;
768         *len16 = DIV_ROUND_UP(
769                 sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16);
770         return 0;
771 }
772
773 static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
774                           u8 *len16)
775 {
776         int ret;
777
778         ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
779                          &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
780         if (ret)
781                 return ret;
782         *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
783                               wr->num_sge * sizeof(struct fw_ri_sge), 16);
784         return 0;
785 }
786
787 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
788                               const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
789                               u8 *len16)
790 {
791         __be64 *p = (__be64 *)fr->pbl;
792
793         fr->r2 = cpu_to_be32(0);
794         fr->stag = cpu_to_be32(mhp->ibmr.rkey);
795
796         fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
797                 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
798                 FW_RI_TPTE_STAGSTATE_V(1) |
799                 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
800                 FW_RI_TPTE_PDID_V(mhp->attr.pdid));
801         fr->tpte.locread_to_qpid = cpu_to_be32(
802                 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
803                 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
804                 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
805         fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
806                 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
807         fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
808         fr->tpte.len_hi = cpu_to_be32(0);
809         fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
810         fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
811         fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
812
813         p[0] = cpu_to_be64((u64)mhp->mpl[0]);
814         p[1] = cpu_to_be64((u64)mhp->mpl[1]);
815
816         *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
817 }
818
819 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
820                         const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
821                         u8 *len16, bool dsgl_supported)
822 {
823         struct fw_ri_immd *imdp;
824         __be64 *p;
825         int i;
826         int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
827         int rem;
828
829         if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
830                 return -EINVAL;
831
832         wqe->fr.qpbinde_to_dcacpu = 0;
833         wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
834         wqe->fr.addr_type = FW_RI_VA_BASED_TO;
835         wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
836         wqe->fr.len_hi = 0;
837         wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
838         wqe->fr.stag = cpu_to_be32(wr->key);
839         wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
840         wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
841                                         0xffffffff);
842
843         if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
844                 struct fw_ri_dsgl *sglp;
845
846                 for (i = 0; i < mhp->mpl_len; i++)
847                         mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
848
849                 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
850                 sglp->op = FW_RI_DATA_DSGL;
851                 sglp->r1 = 0;
852                 sglp->nsge = cpu_to_be16(1);
853                 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
854                 sglp->len0 = cpu_to_be32(pbllen);
855
856                 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
857         } else {
858                 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
859                 imdp->op = FW_RI_DATA_IMMD;
860                 imdp->r1 = 0;
861                 imdp->r2 = 0;
862                 imdp->immdlen = cpu_to_be32(pbllen);
863                 p = (__be64 *)(imdp + 1);
864                 rem = pbllen;
865                 for (i = 0; i < mhp->mpl_len; i++) {
866                         *p = cpu_to_be64((u64)mhp->mpl[i]);
867                         rem -= sizeof(*p);
868                         if (++p == (__be64 *)&sq->queue[sq->size])
869                                 p = (__be64 *)sq->queue;
870                 }
871                 while (rem) {
872                         *p = 0;
873                         rem -= sizeof(*p);
874                         if (++p == (__be64 *)&sq->queue[sq->size])
875                                 p = (__be64 *)sq->queue;
876                 }
877                 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
878                                       + pbllen, 16);
879         }
880         return 0;
881 }
882
883 static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
884                           u8 *len16)
885 {
886         wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
887         wqe->inv.r2 = 0;
888         *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16);
889         return 0;
890 }
891
892 void c4iw_qp_add_ref(struct ib_qp *qp)
893 {
894         pr_debug("ib_qp %p\n", qp);
895         refcount_inc(&to_c4iw_qp(qp)->qp_refcnt);
896 }
897
898 void c4iw_qp_rem_ref(struct ib_qp *qp)
899 {
900         pr_debug("ib_qp %p\n", qp);
901         if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt))
902                 complete(&to_c4iw_qp(qp)->qp_rel_comp);
903 }
904
905 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
906 {
907         if (list_empty(entry))
908                 list_add_tail(entry, head);
909 }
910
911 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
912 {
913         unsigned long flags;
914
915         xa_lock_irqsave(&qhp->rhp->qps, flags);
916         spin_lock(&qhp->lock);
917         if (qhp->rhp->db_state == NORMAL)
918                 t4_ring_sq_db(&qhp->wq, inc, NULL);
919         else {
920                 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
921                 qhp->wq.sq.wq_pidx_inc += inc;
922         }
923         spin_unlock(&qhp->lock);
924         xa_unlock_irqrestore(&qhp->rhp->qps, flags);
925         return 0;
926 }
927
928 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
929 {
930         unsigned long flags;
931
932         xa_lock_irqsave(&qhp->rhp->qps, flags);
933         spin_lock(&qhp->lock);
934         if (qhp->rhp->db_state == NORMAL)
935                 t4_ring_rq_db(&qhp->wq, inc, NULL);
936         else {
937                 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
938                 qhp->wq.rq.wq_pidx_inc += inc;
939         }
940         spin_unlock(&qhp->lock);
941         xa_unlock_irqrestore(&qhp->rhp->qps, flags);
942         return 0;
943 }
944
945 static int ib_to_fw_opcode(int ib_opcode)
946 {
947         int opcode;
948
949         switch (ib_opcode) {
950         case IB_WR_SEND_WITH_INV:
951                 opcode = FW_RI_SEND_WITH_INV;
952                 break;
953         case IB_WR_SEND:
954                 opcode = FW_RI_SEND;
955                 break;
956         case IB_WR_RDMA_WRITE:
957                 opcode = FW_RI_RDMA_WRITE;
958                 break;
959         case IB_WR_RDMA_WRITE_WITH_IMM:
960                 opcode = FW_RI_WRITE_IMMEDIATE;
961                 break;
962         case IB_WR_RDMA_READ:
963         case IB_WR_RDMA_READ_WITH_INV:
964                 opcode = FW_RI_READ_REQ;
965                 break;
966         case IB_WR_REG_MR:
967                 opcode = FW_RI_FAST_REGISTER;
968                 break;
969         case IB_WR_LOCAL_INV:
970                 opcode = FW_RI_LOCAL_INV;
971                 break;
972         default:
973                 opcode = -EINVAL;
974         }
975         return opcode;
976 }
977
978 static int complete_sq_drain_wr(struct c4iw_qp *qhp,
979                                 const struct ib_send_wr *wr)
980 {
981         struct t4_cqe cqe = {};
982         struct c4iw_cq *schp;
983         unsigned long flag;
984         struct t4_cq *cq;
985         int opcode;
986
987         schp = to_c4iw_cq(qhp->ibqp.send_cq);
988         cq = &schp->cq;
989
990         opcode = ib_to_fw_opcode(wr->opcode);
991         if (opcode < 0)
992                 return opcode;
993
994         cqe.u.drain_cookie = wr->wr_id;
995         cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
996                                  CQE_OPCODE_V(opcode) |
997                                  CQE_TYPE_V(1) |
998                                  CQE_SWCQE_V(1) |
999                                  CQE_DRAIN_V(1) |
1000                                  CQE_QPID_V(qhp->wq.sq.qid));
1001
1002         spin_lock_irqsave(&schp->lock, flag);
1003         cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1004         cq->sw_queue[cq->sw_pidx] = cqe;
1005         t4_swcq_produce(cq);
1006         spin_unlock_irqrestore(&schp->lock, flag);
1007
1008         if (t4_clear_cq_armed(&schp->cq)) {
1009                 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1010                 (*schp->ibcq.comp_handler)(&schp->ibcq,
1011                                            schp->ibcq.cq_context);
1012                 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1013         }
1014         return 0;
1015 }
1016
1017 static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
1018                                  const struct ib_send_wr *wr,
1019                                  const struct ib_send_wr **bad_wr)
1020 {
1021         int ret = 0;
1022
1023         while (wr) {
1024                 ret = complete_sq_drain_wr(qhp, wr);
1025                 if (ret) {
1026                         *bad_wr = wr;
1027                         break;
1028                 }
1029                 wr = wr->next;
1030         }
1031         return ret;
1032 }
1033
1034 static void complete_rq_drain_wr(struct c4iw_qp *qhp,
1035                                  const struct ib_recv_wr *wr)
1036 {
1037         struct t4_cqe cqe = {};
1038         struct c4iw_cq *rchp;
1039         unsigned long flag;
1040         struct t4_cq *cq;
1041
1042         rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1043         cq = &rchp->cq;
1044
1045         cqe.u.drain_cookie = wr->wr_id;
1046         cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
1047                                  CQE_OPCODE_V(FW_RI_SEND) |
1048                                  CQE_TYPE_V(0) |
1049                                  CQE_SWCQE_V(1) |
1050                                  CQE_DRAIN_V(1) |
1051                                  CQE_QPID_V(qhp->wq.sq.qid));
1052
1053         spin_lock_irqsave(&rchp->lock, flag);
1054         cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1055         cq->sw_queue[cq->sw_pidx] = cqe;
1056         t4_swcq_produce(cq);
1057         spin_unlock_irqrestore(&rchp->lock, flag);
1058
1059         if (t4_clear_cq_armed(&rchp->cq)) {
1060                 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1061                 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1062                                            rchp->ibcq.cq_context);
1063                 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1064         }
1065 }
1066
1067 static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
1068                                   const struct ib_recv_wr *wr)
1069 {
1070         while (wr) {
1071                 complete_rq_drain_wr(qhp, wr);
1072                 wr = wr->next;
1073         }
1074 }
1075
1076 int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1077                    const struct ib_send_wr **bad_wr)
1078 {
1079         int err = 0;
1080         u8 len16 = 0;
1081         enum fw_wr_opcodes fw_opcode = 0;
1082         enum fw_ri_wr_flags fw_flags;
1083         struct c4iw_qp *qhp;
1084         struct c4iw_dev *rhp;
1085         union t4_wr *wqe = NULL;
1086         u32 num_wrs;
1087         struct t4_swsqe *swsqe;
1088         unsigned long flag;
1089         u16 idx = 0;
1090
1091         qhp = to_c4iw_qp(ibqp);
1092         rhp = qhp->rhp;
1093         spin_lock_irqsave(&qhp->lock, flag);
1094
1095         /*
1096          * If the qp has been flushed, then just insert a special
1097          * drain cqe.
1098          */
1099         if (qhp->wq.flushed) {
1100                 spin_unlock_irqrestore(&qhp->lock, flag);
1101                 err = complete_sq_drain_wrs(qhp, wr, bad_wr);
1102                 return err;
1103         }
1104         num_wrs = t4_sq_avail(&qhp->wq);
1105         if (num_wrs == 0) {
1106                 spin_unlock_irqrestore(&qhp->lock, flag);
1107                 *bad_wr = wr;
1108                 return -ENOMEM;
1109         }
1110
1111         /*
1112          * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
1113          * the response for small NVMEe-oF READ requests.  If the chain is
1114          * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
1115          * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
1116          * request, then build and post the write_cmpl WR. If any of the tests
1117          * below are not true, then we continue on with the tradtional WRITE
1118          * and SEND WRs.
1119          */
1120         if (qhp->rhp->rdev.lldi.write_cmpl_support &&
1121             CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
1122             CHELSIO_T5 &&
1123             wr && wr->next && !wr->next->next &&
1124             wr->opcode == IB_WR_RDMA_WRITE &&
1125             wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
1126             (wr->next->opcode == IB_WR_SEND ||
1127             wr->next->opcode == IB_WR_SEND_WITH_INV) &&
1128             wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
1129             wr->next->num_sge == 1 && num_wrs >= 2) {
1130                 post_write_cmpl(qhp, wr);
1131                 spin_unlock_irqrestore(&qhp->lock, flag);
1132                 return 0;
1133         }
1134
1135         while (wr) {
1136                 if (num_wrs == 0) {
1137                         err = -ENOMEM;
1138                         *bad_wr = wr;
1139                         break;
1140                 }
1141                 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
1142                       qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
1143
1144                 fw_flags = 0;
1145                 if (wr->send_flags & IB_SEND_SOLICITED)
1146                         fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
1147                 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
1148                         fw_flags |= FW_RI_COMPLETION_FLAG;
1149                 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
1150                 switch (wr->opcode) {
1151                 case IB_WR_SEND_WITH_INV:
1152                 case IB_WR_SEND:
1153                         if (wr->send_flags & IB_SEND_FENCE)
1154                                 fw_flags |= FW_RI_READ_FENCE_FLAG;
1155                         fw_opcode = FW_RI_SEND_WR;
1156                         if (wr->opcode == IB_WR_SEND)
1157                                 swsqe->opcode = FW_RI_SEND;
1158                         else
1159                                 swsqe->opcode = FW_RI_SEND_WITH_INV;
1160                         err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
1161                         break;
1162                 case IB_WR_RDMA_WRITE_WITH_IMM:
1163                         if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
1164                                 err = -EINVAL;
1165                                 break;
1166                         }
1167                         fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
1168                         fallthrough;
1169                 case IB_WR_RDMA_WRITE:
1170                         fw_opcode = FW_RI_RDMA_WRITE_WR;
1171                         swsqe->opcode = FW_RI_RDMA_WRITE;
1172                         err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
1173                         break;
1174                 case IB_WR_RDMA_READ:
1175                 case IB_WR_RDMA_READ_WITH_INV:
1176                         fw_opcode = FW_RI_RDMA_READ_WR;
1177                         swsqe->opcode = FW_RI_READ_REQ;
1178                         if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
1179                                 c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
1180                                 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
1181                         } else {
1182                                 fw_flags = 0;
1183                         }
1184                         err = build_rdma_read(wqe, wr, &len16);
1185                         if (err)
1186                                 break;
1187                         swsqe->read_len = wr->sg_list[0].length;
1188                         if (!qhp->wq.sq.oldest_read)
1189                                 qhp->wq.sq.oldest_read = swsqe;
1190                         break;
1191                 case IB_WR_REG_MR: {
1192                         struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
1193
1194                         swsqe->opcode = FW_RI_FAST_REGISTER;
1195                         if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
1196                             !mhp->attr.state && mhp->mpl_len <= 2) {
1197                                 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
1198                                 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
1199                                                   mhp, &len16);
1200                         } else {
1201                                 fw_opcode = FW_RI_FR_NSMR_WR;
1202                                 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
1203                                        mhp, &len16,
1204                                        rhp->rdev.lldi.ulptx_memwrite_dsgl);
1205                                 if (err)
1206                                         break;
1207                         }
1208                         mhp->attr.state = 1;
1209                         break;
1210                 }
1211                 case IB_WR_LOCAL_INV:
1212                         if (wr->send_flags & IB_SEND_FENCE)
1213                                 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
1214                         fw_opcode = FW_RI_INV_LSTAG_WR;
1215                         swsqe->opcode = FW_RI_LOCAL_INV;
1216                         err = build_inv_stag(wqe, wr, &len16);
1217                         c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
1218                         break;
1219                 default:
1220                         pr_warn("%s post of type=%d TBD!\n", __func__,
1221                                 wr->opcode);
1222                         err = -EINVAL;
1223                 }
1224                 if (err) {
1225                         *bad_wr = wr;
1226                         break;
1227                 }
1228                 swsqe->idx = qhp->wq.sq.pidx;
1229                 swsqe->complete = 0;
1230                 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
1231                                   qhp->sq_sig_all;
1232                 swsqe->flushed = 0;
1233                 swsqe->wr_id = wr->wr_id;
1234                 if (c4iw_wr_log) {
1235                         swsqe->sge_ts = cxgb4_read_sge_timestamp(
1236                                         rhp->rdev.lldi.ports[0]);
1237                         swsqe->host_time = ktime_get();
1238                 }
1239
1240                 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
1241
1242                 pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
1243                          (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
1244                          swsqe->opcode, swsqe->read_len);
1245                 wr = wr->next;
1246                 num_wrs--;
1247                 t4_sq_produce(&qhp->wq, len16);
1248                 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1249         }
1250         if (!rhp->rdev.status_page->db_off) {
1251                 t4_ring_sq_db(&qhp->wq, idx, wqe);
1252                 spin_unlock_irqrestore(&qhp->lock, flag);
1253         } else {
1254                 spin_unlock_irqrestore(&qhp->lock, flag);
1255                 ring_kernel_sq_db(qhp, idx);
1256         }
1257         return err;
1258 }
1259
1260 int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1261                       const struct ib_recv_wr **bad_wr)
1262 {
1263         int err = 0;
1264         struct c4iw_qp *qhp;
1265         union t4_recv_wr *wqe = NULL;
1266         u32 num_wrs;
1267         u8 len16 = 0;
1268         unsigned long flag;
1269         u16 idx = 0;
1270
1271         qhp = to_c4iw_qp(ibqp);
1272         spin_lock_irqsave(&qhp->lock, flag);
1273
1274         /*
1275          * If the qp has been flushed, then just insert a special
1276          * drain cqe.
1277          */
1278         if (qhp->wq.flushed) {
1279                 spin_unlock_irqrestore(&qhp->lock, flag);
1280                 complete_rq_drain_wrs(qhp, wr);
1281                 return err;
1282         }
1283         num_wrs = t4_rq_avail(&qhp->wq);
1284         if (num_wrs == 0) {
1285                 spin_unlock_irqrestore(&qhp->lock, flag);
1286                 *bad_wr = wr;
1287                 return -ENOMEM;
1288         }
1289         while (wr) {
1290                 if (wr->num_sge > T4_MAX_RECV_SGE) {
1291                         err = -EINVAL;
1292                         *bad_wr = wr;
1293                         break;
1294                 }
1295                 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1296                                            qhp->wq.rq.wq_pidx *
1297                                            T4_EQ_ENTRY_SIZE);
1298                 if (num_wrs)
1299                         err = build_rdma_recv(qhp, wqe, wr, &len16);
1300                 else
1301                         err = -ENOMEM;
1302                 if (err) {
1303                         *bad_wr = wr;
1304                         break;
1305                 }
1306
1307                 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
1308                 if (c4iw_wr_log) {
1309                         qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1310                                 cxgb4_read_sge_timestamp(
1311                                                 qhp->rhp->rdev.lldi.ports[0]);
1312                         qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
1313                                 ktime_get();
1314                 }
1315
1316                 wqe->recv.opcode = FW_RI_RECV_WR;
1317                 wqe->recv.r1 = 0;
1318                 wqe->recv.wrid = qhp->wq.rq.pidx;
1319                 wqe->recv.r2[0] = 0;
1320                 wqe->recv.r2[1] = 0;
1321                 wqe->recv.r2[2] = 0;
1322                 wqe->recv.len16 = len16;
1323                 pr_debug("cookie 0x%llx pidx %u\n",
1324                          (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
1325                 t4_rq_produce(&qhp->wq, len16);
1326                 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1327                 wr = wr->next;
1328                 num_wrs--;
1329         }
1330         if (!qhp->rhp->rdev.status_page->db_off) {
1331                 t4_ring_rq_db(&qhp->wq, idx, wqe);
1332                 spin_unlock_irqrestore(&qhp->lock, flag);
1333         } else {
1334                 spin_unlock_irqrestore(&qhp->lock, flag);
1335                 ring_kernel_rq_db(qhp, idx);
1336         }
1337         return err;
1338 }
1339
1340 static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
1341                          u64 wr_id, u8 len16)
1342 {
1343         struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
1344
1345         pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
1346                  __func__, srq->cidx, srq->pidx, srq->wq_pidx,
1347                  srq->in_use, srq->ooo_count,
1348                  (unsigned long long)wr_id, srq->pending_cidx,
1349                  srq->pending_pidx, srq->pending_in_use);
1350         pwr->wr_id = wr_id;
1351         pwr->len16 = len16;
1352         memcpy(&pwr->wqe, wqe, len16 * 16);
1353         t4_srq_produce_pending_wr(srq);
1354 }
1355
1356 int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1357                        const struct ib_recv_wr **bad_wr)
1358 {
1359         union t4_recv_wr *wqe, lwqe;
1360         struct c4iw_srq *srq;
1361         unsigned long flag;
1362         u8 len16 = 0;
1363         u16 idx = 0;
1364         int err = 0;
1365         u32 num_wrs;
1366
1367         srq = to_c4iw_srq(ibsrq);
1368         spin_lock_irqsave(&srq->lock, flag);
1369         num_wrs = t4_srq_avail(&srq->wq);
1370         if (num_wrs == 0) {
1371                 spin_unlock_irqrestore(&srq->lock, flag);
1372                 return -ENOMEM;
1373         }
1374         while (wr) {
1375                 if (wr->num_sge > T4_MAX_RECV_SGE) {
1376                         err = -EINVAL;
1377                         *bad_wr = wr;
1378                         break;
1379                 }
1380                 wqe = &lwqe;
1381                 if (num_wrs)
1382                         err = build_srq_recv(wqe, wr, &len16);
1383                 else
1384                         err = -ENOMEM;
1385                 if (err) {
1386                         *bad_wr = wr;
1387                         break;
1388                 }
1389
1390                 wqe->recv.opcode = FW_RI_RECV_WR;
1391                 wqe->recv.r1 = 0;
1392                 wqe->recv.wrid = srq->wq.pidx;
1393                 wqe->recv.r2[0] = 0;
1394                 wqe->recv.r2[1] = 0;
1395                 wqe->recv.r2[2] = 0;
1396                 wqe->recv.len16 = len16;
1397
1398                 if (srq->wq.ooo_count ||
1399                     srq->wq.pending_in_use ||
1400                     srq->wq.sw_rq[srq->wq.pidx].valid) {
1401                         defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
1402                 } else {
1403                         srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
1404                         srq->wq.sw_rq[srq->wq.pidx].valid = 1;
1405                         c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
1406                         pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
1407                                  __func__, srq->wq.cidx,
1408                                  srq->wq.pidx, srq->wq.wq_pidx,
1409                                  srq->wq.in_use,
1410                                  (unsigned long long)wr->wr_id);
1411                         t4_srq_produce(&srq->wq, len16);
1412                         idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
1413                 }
1414                 wr = wr->next;
1415                 num_wrs--;
1416         }
1417         if (idx)
1418                 t4_ring_srq_db(&srq->wq, idx, len16, wqe);
1419         spin_unlock_irqrestore(&srq->lock, flag);
1420         return err;
1421 }
1422
1423 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1424                                     u8 *ecode)
1425 {
1426         int status;
1427         int tagged;
1428         int opcode;
1429         int rqtype;
1430         int send_inv;
1431
1432         if (!err_cqe) {
1433                 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1434                 *ecode = 0;
1435                 return;
1436         }
1437
1438         status = CQE_STATUS(err_cqe);
1439         opcode = CQE_OPCODE(err_cqe);
1440         rqtype = RQ_TYPE(err_cqe);
1441         send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1442                    (opcode == FW_RI_SEND_WITH_SE_INV);
1443         tagged = (opcode == FW_RI_RDMA_WRITE) ||
1444                  (rqtype && (opcode == FW_RI_READ_RESP));
1445
1446         switch (status) {
1447         case T4_ERR_STAG:
1448                 if (send_inv) {
1449                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1450                         *ecode = RDMAP_CANT_INV_STAG;
1451                 } else {
1452                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1453                         *ecode = RDMAP_INV_STAG;
1454                 }
1455                 break;
1456         case T4_ERR_PDID:
1457                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1458                 if ((opcode == FW_RI_SEND_WITH_INV) ||
1459                     (opcode == FW_RI_SEND_WITH_SE_INV))
1460                         *ecode = RDMAP_CANT_INV_STAG;
1461                 else
1462                         *ecode = RDMAP_STAG_NOT_ASSOC;
1463                 break;
1464         case T4_ERR_QPID:
1465                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1466                 *ecode = RDMAP_STAG_NOT_ASSOC;
1467                 break;
1468         case T4_ERR_ACCESS:
1469                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1470                 *ecode = RDMAP_ACC_VIOL;
1471                 break;
1472         case T4_ERR_WRAP:
1473                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1474                 *ecode = RDMAP_TO_WRAP;
1475                 break;
1476         case T4_ERR_BOUND:
1477                 if (tagged) {
1478                         *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1479                         *ecode = DDPT_BASE_BOUNDS;
1480                 } else {
1481                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1482                         *ecode = RDMAP_BASE_BOUNDS;
1483                 }
1484                 break;
1485         case T4_ERR_INVALIDATE_SHARED_MR:
1486         case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1487                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1488                 *ecode = RDMAP_CANT_INV_STAG;
1489                 break;
1490         case T4_ERR_ECC:
1491         case T4_ERR_ECC_PSTAG:
1492         case T4_ERR_INTERNAL_ERR:
1493                 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1494                 *ecode = 0;
1495                 break;
1496         case T4_ERR_OUT_OF_RQE:
1497                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1498                 *ecode = DDPU_INV_MSN_NOBUF;
1499                 break;
1500         case T4_ERR_PBL_ADDR_BOUND:
1501                 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1502                 *ecode = DDPT_BASE_BOUNDS;
1503                 break;
1504         case T4_ERR_CRC:
1505                 *layer_type = LAYER_MPA|DDP_LLP;
1506                 *ecode = MPA_CRC_ERR;
1507                 break;
1508         case T4_ERR_MARKER:
1509                 *layer_type = LAYER_MPA|DDP_LLP;
1510                 *ecode = MPA_MARKER_ERR;
1511                 break;
1512         case T4_ERR_PDU_LEN_ERR:
1513                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1514                 *ecode = DDPU_MSG_TOOBIG;
1515                 break;
1516         case T4_ERR_DDP_VERSION:
1517                 if (tagged) {
1518                         *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1519                         *ecode = DDPT_INV_VERS;
1520                 } else {
1521                         *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1522                         *ecode = DDPU_INV_VERS;
1523                 }
1524                 break;
1525         case T4_ERR_RDMA_VERSION:
1526                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1527                 *ecode = RDMAP_INV_VERS;
1528                 break;
1529         case T4_ERR_OPCODE:
1530                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1531                 *ecode = RDMAP_INV_OPCODE;
1532                 break;
1533         case T4_ERR_DDP_QUEUE_NUM:
1534                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1535                 *ecode = DDPU_INV_QN;
1536                 break;
1537         case T4_ERR_MSN:
1538         case T4_ERR_MSN_GAP:
1539         case T4_ERR_MSN_RANGE:
1540         case T4_ERR_IRD_OVERFLOW:
1541                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1542                 *ecode = DDPU_INV_MSN_RANGE;
1543                 break;
1544         case T4_ERR_TBIT:
1545                 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1546                 *ecode = 0;
1547                 break;
1548         case T4_ERR_MO:
1549                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1550                 *ecode = DDPU_INV_MO;
1551                 break;
1552         default:
1553                 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1554                 *ecode = 0;
1555                 break;
1556         }
1557 }
1558
1559 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1560                            gfp_t gfp)
1561 {
1562         struct fw_ri_wr *wqe;
1563         struct sk_buff *skb;
1564         struct terminate_message *term;
1565
1566         pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
1567                  qhp->ep->hwtid);
1568
1569         skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1570         if (WARN_ON(!skb))
1571                 return;
1572
1573         set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1574
1575         wqe = __skb_put_zero(skb, sizeof(*wqe));
1576         wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1577         wqe->flowid_len16 = cpu_to_be32(
1578                 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1579                 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1580
1581         wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1582         wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term));
1583         term = (struct terminate_message *)wqe->u.terminate.termmsg;
1584         if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1585                 term->layer_etype = qhp->attr.layer_etype;
1586                 term->ecode = qhp->attr.ecode;
1587         } else
1588                 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1589         c4iw_ofld_send(&qhp->rhp->rdev, skb);
1590 }
1591
1592 /*
1593  * Assumes qhp lock is held.
1594  */
1595 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1596                        struct c4iw_cq *schp)
1597 {
1598         int count;
1599         int rq_flushed = 0, sq_flushed;
1600         unsigned long flag;
1601
1602         pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
1603
1604         /* locking hierarchy: cqs lock first, then qp lock. */
1605         spin_lock_irqsave(&rchp->lock, flag);
1606         if (schp != rchp)
1607                 spin_lock(&schp->lock);
1608         spin_lock(&qhp->lock);
1609
1610         if (qhp->wq.flushed) {
1611                 spin_unlock(&qhp->lock);
1612                 if (schp != rchp)
1613                         spin_unlock(&schp->lock);
1614                 spin_unlock_irqrestore(&rchp->lock, flag);
1615                 return;
1616         }
1617         qhp->wq.flushed = 1;
1618         t4_set_wq_in_error(&qhp->wq, 0);
1619
1620         c4iw_flush_hw_cq(rchp, qhp);
1621         if (!qhp->srq) {
1622                 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1623                 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1624         }
1625
1626         if (schp != rchp)
1627                 c4iw_flush_hw_cq(schp, qhp);
1628         sq_flushed = c4iw_flush_sq(qhp);
1629
1630         spin_unlock(&qhp->lock);
1631         if (schp != rchp)
1632                 spin_unlock(&schp->lock);
1633         spin_unlock_irqrestore(&rchp->lock, flag);
1634
1635         if (schp == rchp) {
1636                 if ((rq_flushed || sq_flushed) &&
1637                     t4_clear_cq_armed(&rchp->cq)) {
1638                         spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1639                         (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1640                                                    rchp->ibcq.cq_context);
1641                         spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1642                 }
1643         } else {
1644                 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1645                         spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1646                         (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1647                                                    rchp->ibcq.cq_context);
1648                         spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1649                 }
1650                 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1651                         spin_lock_irqsave(&schp->comp_handler_lock, flag);
1652                         (*schp->ibcq.comp_handler)(&schp->ibcq,
1653                                                    schp->ibcq.cq_context);
1654                         spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1655                 }
1656         }
1657 }
1658
1659 static void flush_qp(struct c4iw_qp *qhp)
1660 {
1661         struct c4iw_cq *rchp, *schp;
1662         unsigned long flag;
1663
1664         rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1665         schp = to_c4iw_cq(qhp->ibqp.send_cq);
1666
1667         if (qhp->ibqp.uobject) {
1668
1669                 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1670                 if (qhp->wq.flushed)
1671                         return;
1672
1673                 qhp->wq.flushed = 1;
1674                 t4_set_wq_in_error(&qhp->wq, 0);
1675                 t4_set_cq_in_error(&rchp->cq);
1676                 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1677                 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1678                 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1679                 if (schp != rchp) {
1680                         t4_set_cq_in_error(&schp->cq);
1681                         spin_lock_irqsave(&schp->comp_handler_lock, flag);
1682                         (*schp->ibcq.comp_handler)(&schp->ibcq,
1683                                         schp->ibcq.cq_context);
1684                         spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1685                 }
1686                 return;
1687         }
1688         __flush_qp(qhp, rchp, schp);
1689 }
1690
1691 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1692                      struct c4iw_ep *ep)
1693 {
1694         struct fw_ri_wr *wqe;
1695         int ret;
1696         struct sk_buff *skb;
1697
1698         pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
1699
1700         skb = skb_dequeue(&ep->com.ep_skb_list);
1701         if (WARN_ON(!skb))
1702                 return -ENOMEM;
1703
1704         set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1705
1706         wqe = __skb_put_zero(skb, sizeof(*wqe));
1707         wqe->op_compl = cpu_to_be32(
1708                 FW_WR_OP_V(FW_RI_INIT_WR) |
1709                 FW_WR_COMPL_F);
1710         wqe->flowid_len16 = cpu_to_be32(
1711                 FW_WR_FLOWID_V(ep->hwtid) |
1712                 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1713         wqe->cookie = (uintptr_t)ep->com.wr_waitp;
1714
1715         wqe->u.fini.type = FW_RI_TYPE_FINI;
1716
1717         ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
1718                                  qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1719
1720         pr_debug("ret %d\n", ret);
1721         return ret;
1722 }
1723
1724 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1725 {
1726         pr_debug("p2p_type = %d\n", p2p_type);
1727         memset(&init->u, 0, sizeof(init->u));
1728         switch (p2p_type) {
1729         case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1730                 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1731                 init->u.write.stag_sink = cpu_to_be32(1);
1732                 init->u.write.to_sink = cpu_to_be64(1);
1733                 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1734                 init->u.write.len16 = DIV_ROUND_UP(
1735                         sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16);
1736                 break;
1737         case FW_RI_INIT_P2PTYPE_READ_REQ:
1738                 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1739                 init->u.read.stag_src = cpu_to_be32(1);
1740                 init->u.read.to_src_lo = cpu_to_be32(1);
1741                 init->u.read.stag_sink = cpu_to_be32(1);
1742                 init->u.read.to_sink_lo = cpu_to_be32(1);
1743                 init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16);
1744                 break;
1745         }
1746 }
1747
1748 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1749 {
1750         struct fw_ri_wr *wqe;
1751         int ret;
1752         struct sk_buff *skb;
1753
1754         pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
1755                  qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1756
1757         skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
1758         if (!skb) {
1759                 ret = -ENOMEM;
1760                 goto out;
1761         }
1762         ret = alloc_ird(rhp, qhp->attr.max_ird);
1763         if (ret) {
1764                 qhp->attr.max_ird = 0;
1765                 kfree_skb(skb);
1766                 goto out;
1767         }
1768         set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1769
1770         wqe = __skb_put_zero(skb, sizeof(*wqe));
1771         wqe->op_compl = cpu_to_be32(
1772                 FW_WR_OP_V(FW_RI_INIT_WR) |
1773                 FW_WR_COMPL_F);
1774         wqe->flowid_len16 = cpu_to_be32(
1775                 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1776                 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1777
1778         wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
1779
1780         wqe->u.init.type = FW_RI_TYPE_INIT;
1781         wqe->u.init.mpareqbit_p2ptype =
1782                 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1783                 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1784         wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1785         if (qhp->attr.mpa_attr.recv_marker_enabled)
1786                 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1787         if (qhp->attr.mpa_attr.xmit_marker_enabled)
1788                 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1789         if (qhp->attr.mpa_attr.crc_enabled)
1790                 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1791
1792         wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1793                             FW_RI_QP_RDMA_WRITE_ENABLE |
1794                             FW_RI_QP_BIND_ENABLE;
1795         if (!qhp->ibqp.uobject)
1796                 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1797                                      FW_RI_QP_STAG0_ENABLE;
1798         wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1799         wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1800         wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1801         wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1802         if (qhp->srq) {
1803                 wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
1804                                                   qhp->srq->idx);
1805         } else {
1806                 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1807                 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1808                 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1809                                                    rhp->rdev.lldi.vr->rq.start);
1810         }
1811         wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1812         wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1813         wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1814         wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1815         wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1816         wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1817         if (qhp->attr.mpa_attr.initiator)
1818                 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1819
1820         ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
1821                                  qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1822         if (!ret)
1823                 goto out;
1824
1825         free_ird(rhp, qhp->attr.max_ird);
1826 out:
1827         pr_debug("ret %d\n", ret);
1828         return ret;
1829 }
1830
1831 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1832                    enum c4iw_qp_attr_mask mask,
1833                    struct c4iw_qp_attributes *attrs,
1834                    int internal)
1835 {
1836         int ret = 0;
1837         struct c4iw_qp_attributes newattr = qhp->attr;
1838         int disconnect = 0;
1839         int terminate = 0;
1840         int abort = 0;
1841         int free = 0;
1842         struct c4iw_ep *ep = NULL;
1843
1844         pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1845                  qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1846                  (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1847
1848         mutex_lock(&qhp->mutex);
1849
1850         /* Process attr changes if in IDLE */
1851         if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1852                 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1853                         ret = -EIO;
1854                         goto out;
1855                 }
1856                 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1857                         newattr.enable_rdma_read = attrs->enable_rdma_read;
1858                 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1859                         newattr.enable_rdma_write = attrs->enable_rdma_write;
1860                 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1861                         newattr.enable_bind = attrs->enable_bind;
1862                 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1863                         if (attrs->max_ord > c4iw_max_read_depth) {
1864                                 ret = -EINVAL;
1865                                 goto out;
1866                         }
1867                         newattr.max_ord = attrs->max_ord;
1868                 }
1869                 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1870                         if (attrs->max_ird > cur_max_read_depth(rhp)) {
1871                                 ret = -EINVAL;
1872                                 goto out;
1873                         }
1874                         newattr.max_ird = attrs->max_ird;
1875                 }
1876                 qhp->attr = newattr;
1877         }
1878
1879         if (mask & C4IW_QP_ATTR_SQ_DB) {
1880                 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1881                 goto out;
1882         }
1883         if (mask & C4IW_QP_ATTR_RQ_DB) {
1884                 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1885                 goto out;
1886         }
1887
1888         if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1889                 goto out;
1890         if (qhp->attr.state == attrs->next_state)
1891                 goto out;
1892
1893         switch (qhp->attr.state) {
1894         case C4IW_QP_STATE_IDLE:
1895                 switch (attrs->next_state) {
1896                 case C4IW_QP_STATE_RTS:
1897                         if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1898                                 ret = -EINVAL;
1899                                 goto out;
1900                         }
1901                         if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1902                                 ret = -EINVAL;
1903                                 goto out;
1904                         }
1905                         qhp->attr.mpa_attr = attrs->mpa_attr;
1906                         qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1907                         qhp->ep = qhp->attr.llp_stream_handle;
1908                         set_state(qhp, C4IW_QP_STATE_RTS);
1909
1910                         /*
1911                          * Ref the endpoint here and deref when we
1912                          * disassociate the endpoint from the QP.  This
1913                          * happens in CLOSING->IDLE transition or *->ERROR
1914                          * transition.
1915                          */
1916                         c4iw_get_ep(&qhp->ep->com);
1917                         ret = rdma_init(rhp, qhp);
1918                         if (ret)
1919                                 goto err;
1920                         break;
1921                 case C4IW_QP_STATE_ERROR:
1922                         set_state(qhp, C4IW_QP_STATE_ERROR);
1923                         flush_qp(qhp);
1924                         break;
1925                 default:
1926                         ret = -EINVAL;
1927                         goto out;
1928                 }
1929                 break;
1930         case C4IW_QP_STATE_RTS:
1931                 switch (attrs->next_state) {
1932                 case C4IW_QP_STATE_CLOSING:
1933                         t4_set_wq_in_error(&qhp->wq, 0);
1934                         set_state(qhp, C4IW_QP_STATE_CLOSING);
1935                         ep = qhp->ep;
1936                         if (!internal) {
1937                                 abort = 0;
1938                                 disconnect = 1;
1939                                 c4iw_get_ep(&qhp->ep->com);
1940                         }
1941                         ret = rdma_fini(rhp, qhp, ep);
1942                         if (ret)
1943                                 goto err;
1944                         break;
1945                 case C4IW_QP_STATE_TERMINATE:
1946                         t4_set_wq_in_error(&qhp->wq, 0);
1947                         set_state(qhp, C4IW_QP_STATE_TERMINATE);
1948                         qhp->attr.layer_etype = attrs->layer_etype;
1949                         qhp->attr.ecode = attrs->ecode;
1950                         ep = qhp->ep;
1951                         if (!internal) {
1952                                 c4iw_get_ep(&ep->com);
1953                                 terminate = 1;
1954                                 disconnect = 1;
1955                         } else {
1956                                 terminate = qhp->attr.send_term;
1957                                 ret = rdma_fini(rhp, qhp, ep);
1958                                 if (ret)
1959                                         goto err;
1960                         }
1961                         break;
1962                 case C4IW_QP_STATE_ERROR:
1963                         t4_set_wq_in_error(&qhp->wq, 0);
1964                         set_state(qhp, C4IW_QP_STATE_ERROR);
1965                         if (!internal) {
1966                                 abort = 1;
1967                                 disconnect = 1;
1968                                 ep = qhp->ep;
1969                                 c4iw_get_ep(&qhp->ep->com);
1970                         }
1971                         goto err;
1972                         break;
1973                 default:
1974                         ret = -EINVAL;
1975                         goto out;
1976                 }
1977                 break;
1978         case C4IW_QP_STATE_CLOSING:
1979
1980                 /*
1981                  * Allow kernel users to move to ERROR for qp draining.
1982                  */
1983                 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1984                                   C4IW_QP_STATE_ERROR)) {
1985                         ret = -EINVAL;
1986                         goto out;
1987                 }
1988                 switch (attrs->next_state) {
1989                 case C4IW_QP_STATE_IDLE:
1990                         flush_qp(qhp);
1991                         set_state(qhp, C4IW_QP_STATE_IDLE);
1992                         qhp->attr.llp_stream_handle = NULL;
1993                         c4iw_put_ep(&qhp->ep->com);
1994                         qhp->ep = NULL;
1995                         wake_up(&qhp->wait);
1996                         break;
1997                 case C4IW_QP_STATE_ERROR:
1998                         goto err;
1999                 default:
2000                         ret = -EINVAL;
2001                         goto err;
2002                 }
2003                 break;
2004         case C4IW_QP_STATE_ERROR:
2005                 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
2006                         ret = -EINVAL;
2007                         goto out;
2008                 }
2009                 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
2010                         ret = -EINVAL;
2011                         goto out;
2012                 }
2013                 set_state(qhp, C4IW_QP_STATE_IDLE);
2014                 break;
2015         case C4IW_QP_STATE_TERMINATE:
2016                 if (!internal) {
2017                         ret = -EINVAL;
2018                         goto out;
2019                 }
2020                 goto err;
2021                 break;
2022         default:
2023                 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
2024                 ret = -EINVAL;
2025                 goto err;
2026                 break;
2027         }
2028         goto out;
2029 err:
2030         pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
2031                  qhp->wq.sq.qid);
2032
2033         /* disassociate the LLP connection */
2034         qhp->attr.llp_stream_handle = NULL;
2035         if (!ep)
2036                 ep = qhp->ep;
2037         qhp->ep = NULL;
2038         set_state(qhp, C4IW_QP_STATE_ERROR);
2039         free = 1;
2040         abort = 1;
2041         flush_qp(qhp);
2042         wake_up(&qhp->wait);
2043 out:
2044         mutex_unlock(&qhp->mutex);
2045
2046         if (terminate)
2047                 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
2048
2049         /*
2050          * If disconnect is 1, then we need to initiate a disconnect
2051          * on the EP.  This can be a normal close (RTS->CLOSING) or
2052          * an abnormal close (RTS/CLOSING->ERROR).
2053          */
2054         if (disconnect) {
2055                 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
2056                                                          GFP_KERNEL);
2057                 c4iw_put_ep(&ep->com);
2058         }
2059
2060         /*
2061          * If free is 1, then we've disassociated the EP from the QP
2062          * and we need to dereference the EP.
2063          */
2064         if (free)
2065                 c4iw_put_ep(&ep->com);
2066         pr_debug("exit state %d\n", qhp->attr.state);
2067         return ret;
2068 }
2069
2070 int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
2071 {
2072         struct c4iw_dev *rhp;
2073         struct c4iw_qp *qhp;
2074         struct c4iw_ucontext *ucontext;
2075         struct c4iw_qp_attributes attrs;
2076
2077         qhp = to_c4iw_qp(ib_qp);
2078         rhp = qhp->rhp;
2079         ucontext = qhp->ucontext;
2080
2081         attrs.next_state = C4IW_QP_STATE_ERROR;
2082         if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
2083                 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2084         else
2085                 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
2086         wait_event(qhp->wait, !qhp->ep);
2087
2088         xa_lock_irq(&rhp->qps);
2089         __xa_erase(&rhp->qps, qhp->wq.sq.qid);
2090         if (!list_empty(&qhp->db_fc_entry))
2091                 list_del_init(&qhp->db_fc_entry);
2092         xa_unlock_irq(&rhp->qps);
2093         free_ird(rhp, qhp->attr.max_ird);
2094
2095         c4iw_qp_rem_ref(ib_qp);
2096
2097         wait_for_completion(&qhp->qp_rel_comp);
2098
2099         pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
2100         pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
2101
2102         destroy_qp(&rhp->rdev, &qhp->wq,
2103                    ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
2104
2105         c4iw_put_wr_wait(qhp->wr_waitp);
2106
2107         kfree(qhp);
2108         return 0;
2109 }
2110
2111 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
2112                              struct ib_udata *udata)
2113 {
2114         struct c4iw_dev *rhp;
2115         struct c4iw_qp *qhp;
2116         struct c4iw_pd *php;
2117         struct c4iw_cq *schp;
2118         struct c4iw_cq *rchp;
2119         struct c4iw_create_qp_resp uresp;
2120         unsigned int sqsize, rqsize = 0;
2121         struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
2122                 udata, struct c4iw_ucontext, ibucontext);
2123         int ret;
2124         struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
2125         struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
2126
2127         pr_debug("ib_pd %p\n", pd);
2128
2129         if (attrs->qp_type != IB_QPT_RC)
2130                 return ERR_PTR(-EOPNOTSUPP);
2131
2132         php = to_c4iw_pd(pd);
2133         rhp = php->rhp;
2134         schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
2135         rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
2136         if (!schp || !rchp)
2137                 return ERR_PTR(-EINVAL);
2138
2139         if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
2140                 return ERR_PTR(-EINVAL);
2141
2142         if (!attrs->srq) {
2143                 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2144                         return ERR_PTR(-E2BIG);
2145                 rqsize = attrs->cap.max_recv_wr + 1;
2146                 if (rqsize < 8)
2147                         rqsize = 8;
2148         }
2149
2150         if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
2151                 return ERR_PTR(-E2BIG);
2152         sqsize = attrs->cap.max_send_wr + 1;
2153         if (sqsize < 8)
2154                 sqsize = 8;
2155
2156         qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
2157         if (!qhp)
2158                 return ERR_PTR(-ENOMEM);
2159
2160         qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2161         if (!qhp->wr_waitp) {
2162                 ret = -ENOMEM;
2163                 goto err_free_qhp;
2164         }
2165
2166         qhp->wq.sq.size = sqsize;
2167         qhp->wq.sq.memsize =
2168                 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2169                 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
2170         qhp->wq.sq.flush_cidx = -1;
2171         if (!attrs->srq) {
2172                 qhp->wq.rq.size = rqsize;
2173                 qhp->wq.rq.memsize =
2174                         (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2175                         sizeof(*qhp->wq.rq.queue);
2176         }
2177
2178         if (ucontext) {
2179                 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
2180                 if (!attrs->srq)
2181                         qhp->wq.rq.memsize =
2182                                 roundup(qhp->wq.rq.memsize, PAGE_SIZE);
2183         }
2184
2185         ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
2186                         ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2187                         qhp->wr_waitp, !attrs->srq);
2188         if (ret)
2189                 goto err_free_wr_wait;
2190
2191         attrs->cap.max_recv_wr = rqsize - 1;
2192         attrs->cap.max_send_wr = sqsize - 1;
2193         attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
2194
2195         qhp->rhp = rhp;
2196         qhp->attr.pd = php->pdid;
2197         qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
2198         qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
2199         qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
2200         qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
2201         qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
2202         if (!attrs->srq) {
2203                 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
2204                 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
2205         }
2206         qhp->attr.state = C4IW_QP_STATE_IDLE;
2207         qhp->attr.next_state = C4IW_QP_STATE_IDLE;
2208         qhp->attr.enable_rdma_read = 1;
2209         qhp->attr.enable_rdma_write = 1;
2210         qhp->attr.enable_bind = 1;
2211         qhp->attr.max_ord = 0;
2212         qhp->attr.max_ird = 0;
2213         qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
2214         spin_lock_init(&qhp->lock);
2215         mutex_init(&qhp->mutex);
2216         init_waitqueue_head(&qhp->wait);
2217         init_completion(&qhp->qp_rel_comp);
2218         refcount_set(&qhp->qp_refcnt, 1);
2219
2220         ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
2221         if (ret)
2222                 goto err_destroy_qp;
2223
2224         if (udata && ucontext) {
2225                 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
2226                 if (!sq_key_mm) {
2227                         ret = -ENOMEM;
2228                         goto err_remove_handle;
2229                 }
2230                 if (!attrs->srq) {
2231                         rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
2232                         if (!rq_key_mm) {
2233                                 ret = -ENOMEM;
2234                                 goto err_free_sq_key;
2235                         }
2236                 }
2237                 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
2238                 if (!sq_db_key_mm) {
2239                         ret = -ENOMEM;
2240                         goto err_free_rq_key;
2241                 }
2242                 if (!attrs->srq) {
2243                         rq_db_key_mm =
2244                                 kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
2245                         if (!rq_db_key_mm) {
2246                                 ret = -ENOMEM;
2247                                 goto err_free_sq_db_key;
2248                         }
2249                 }
2250                 memset(&uresp, 0, sizeof(uresp));
2251                 if (t4_sq_onchip(&qhp->wq.sq)) {
2252                         ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
2253                                                  GFP_KERNEL);
2254                         if (!ma_sync_key_mm) {
2255                                 ret = -ENOMEM;
2256                                 goto err_free_rq_db_key;
2257                         }
2258                         uresp.flags = C4IW_QPF_ONCHIP;
2259                 }
2260                 if (rhp->rdev.lldi.write_w_imm_support)
2261                         uresp.flags |= C4IW_QPF_WRITE_W_IMM;
2262                 uresp.qid_mask = rhp->rdev.qpmask;
2263                 uresp.sqid = qhp->wq.sq.qid;
2264                 uresp.sq_size = qhp->wq.sq.size;
2265                 uresp.sq_memsize = qhp->wq.sq.memsize;
2266                 if (!attrs->srq) {
2267                         uresp.rqid = qhp->wq.rq.qid;
2268                         uresp.rq_size = qhp->wq.rq.size;
2269                         uresp.rq_memsize = qhp->wq.rq.memsize;
2270                 }
2271                 spin_lock(&ucontext->mmap_lock);
2272                 if (ma_sync_key_mm) {
2273                         uresp.ma_sync_key = ucontext->key;
2274                         ucontext->key += PAGE_SIZE;
2275                 }
2276                 uresp.sq_key = ucontext->key;
2277                 ucontext->key += PAGE_SIZE;
2278                 if (!attrs->srq) {
2279                         uresp.rq_key = ucontext->key;
2280                         ucontext->key += PAGE_SIZE;
2281                 }
2282                 uresp.sq_db_gts_key = ucontext->key;
2283                 ucontext->key += PAGE_SIZE;
2284                 if (!attrs->srq) {
2285                         uresp.rq_db_gts_key = ucontext->key;
2286                         ucontext->key += PAGE_SIZE;
2287                 }
2288                 spin_unlock(&ucontext->mmap_lock);
2289                 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2290                 if (ret)
2291                         goto err_free_ma_sync_key;
2292                 sq_key_mm->key = uresp.sq_key;
2293                 sq_key_mm->addr = qhp->wq.sq.phys_addr;
2294                 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
2295                 insert_mmap(ucontext, sq_key_mm);
2296                 if (!attrs->srq) {
2297                         rq_key_mm->key = uresp.rq_key;
2298                         rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
2299                         rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
2300                         insert_mmap(ucontext, rq_key_mm);
2301                 }
2302                 sq_db_key_mm->key = uresp.sq_db_gts_key;
2303                 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
2304                 sq_db_key_mm->len = PAGE_SIZE;
2305                 insert_mmap(ucontext, sq_db_key_mm);
2306                 if (!attrs->srq) {
2307                         rq_db_key_mm->key = uresp.rq_db_gts_key;
2308                         rq_db_key_mm->addr =
2309                                 (u64)(unsigned long)qhp->wq.rq.bar2_pa;
2310                         rq_db_key_mm->len = PAGE_SIZE;
2311                         insert_mmap(ucontext, rq_db_key_mm);
2312                 }
2313                 if (ma_sync_key_mm) {
2314                         ma_sync_key_mm->key = uresp.ma_sync_key;
2315                         ma_sync_key_mm->addr =
2316                                 (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
2317                                 PCIE_MA_SYNC_A) & PAGE_MASK;
2318                         ma_sync_key_mm->len = PAGE_SIZE;
2319                         insert_mmap(ucontext, ma_sync_key_mm);
2320                 }
2321
2322                 qhp->ucontext = ucontext;
2323         }
2324         if (!attrs->srq) {
2325                 qhp->wq.qp_errp =
2326                         &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
2327         } else {
2328                 qhp->wq.qp_errp =
2329                         &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
2330                 qhp->wq.srqidxp =
2331                         &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
2332         }
2333
2334         qhp->ibqp.qp_num = qhp->wq.sq.qid;
2335         if (attrs->srq)
2336                 qhp->srq = to_c4iw_srq(attrs->srq);
2337         INIT_LIST_HEAD(&qhp->db_fc_entry);
2338         pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
2339                  qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
2340                  attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
2341                  qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
2342         return &qhp->ibqp;
2343 err_free_ma_sync_key:
2344         kfree(ma_sync_key_mm);
2345 err_free_rq_db_key:
2346         if (!attrs->srq)
2347                 kfree(rq_db_key_mm);
2348 err_free_sq_db_key:
2349         kfree(sq_db_key_mm);
2350 err_free_rq_key:
2351         if (!attrs->srq)
2352                 kfree(rq_key_mm);
2353 err_free_sq_key:
2354         kfree(sq_key_mm);
2355 err_remove_handle:
2356         xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
2357 err_destroy_qp:
2358         destroy_qp(&rhp->rdev, &qhp->wq,
2359                    ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
2360 err_free_wr_wait:
2361         c4iw_put_wr_wait(qhp->wr_waitp);
2362 err_free_qhp:
2363         kfree(qhp);
2364         return ERR_PTR(ret);
2365 }
2366
2367 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2368                       int attr_mask, struct ib_udata *udata)
2369 {
2370         struct c4iw_dev *rhp;
2371         struct c4iw_qp *qhp;
2372         enum c4iw_qp_attr_mask mask = 0;
2373         struct c4iw_qp_attributes attrs = {};
2374
2375         pr_debug("ib_qp %p\n", ibqp);
2376
2377         /* iwarp does not support the RTR state */
2378         if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
2379                 attr_mask &= ~IB_QP_STATE;
2380
2381         /* Make sure we still have something left to do */
2382         if (!attr_mask)
2383                 return 0;
2384
2385         qhp = to_c4iw_qp(ibqp);
2386         rhp = qhp->rhp;
2387
2388         attrs.next_state = c4iw_convert_state(attr->qp_state);
2389         attrs.enable_rdma_read = (attr->qp_access_flags &
2390                                IB_ACCESS_REMOTE_READ) ?  1 : 0;
2391         attrs.enable_rdma_write = (attr->qp_access_flags &
2392                                 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2393         attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
2394
2395
2396         mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
2397         mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
2398                         (C4IW_QP_ATTR_ENABLE_RDMA_READ |
2399                          C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
2400                          C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
2401
2402         /*
2403          * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2404          * ringing the queue db when we're in DB_FULL mode.
2405          * Only allow this on T4 devices.
2406          */
2407         attrs.sq_db_inc = attr->sq_psn;
2408         attrs.rq_db_inc = attr->rq_psn;
2409         mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
2410         mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
2411         if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
2412             (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
2413                 return -EINVAL;
2414
2415         return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2416 }
2417
2418 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
2419 {
2420         pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
2421         return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
2422 }
2423
2424 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
2425 {
2426         struct ib_event event = {};
2427
2428         event.device = &srq->rhp->ibdev;
2429         event.element.srq = &srq->ibsrq;
2430         event.event = IB_EVENT_SRQ_LIMIT_REACHED;
2431         ib_dispatch_event(&event);
2432 }
2433
2434 int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
2435                     enum ib_srq_attr_mask srq_attr_mask,
2436                     struct ib_udata *udata)
2437 {
2438         struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2439         int ret = 0;
2440
2441         /*
2442          * XXX 0 mask == a SW interrupt for srq_limit reached...
2443          */
2444         if (udata && !srq_attr_mask) {
2445                 c4iw_dispatch_srq_limit_reached_event(srq);
2446                 goto out;
2447         }
2448
2449         /* no support for this yet */
2450         if (srq_attr_mask & IB_SRQ_MAX_WR) {
2451                 ret = -EINVAL;
2452                 goto out;
2453         }
2454
2455         if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
2456                 srq->armed = true;
2457                 srq->srq_limit = attr->srq_limit;
2458         }
2459 out:
2460         return ret;
2461 }
2462
2463 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2464                      int attr_mask, struct ib_qp_init_attr *init_attr)
2465 {
2466         struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2467
2468         memset(attr, 0, sizeof(*attr));
2469         memset(init_attr, 0, sizeof(*init_attr));
2470         attr->qp_state = to_ib_qp_state(qhp->attr.state);
2471         init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2472         init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2473         init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
2474         init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
2475         init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
2476         init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
2477         return 0;
2478 }
2479
2480 static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2481                            struct c4iw_wr_wait *wr_waitp)
2482 {
2483         struct c4iw_rdev *rdev = &srq->rhp->rdev;
2484         struct sk_buff *skb = srq->destroy_skb;
2485         struct t4_srq *wq = &srq->wq;
2486         struct fw_ri_res_wr *res_wr;
2487         struct fw_ri_res *res;
2488         int wr_len;
2489
2490         wr_len = sizeof(*res_wr) + sizeof(*res);
2491         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2492
2493         res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2494         memset(res_wr, 0, wr_len);
2495         res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2496                         FW_RI_RES_WR_NRES_V(1) |
2497                         FW_WR_COMPL_F);
2498         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2499         res_wr->cookie = (uintptr_t)wr_waitp;
2500         res = res_wr->res;
2501         res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2502         res->u.srq.op = FW_RI_RES_OP_RESET;
2503         res->u.srq.srqid = cpu_to_be32(srq->idx);
2504         res->u.srq.eqid = cpu_to_be32(wq->qid);
2505
2506         c4iw_init_wr_wait(wr_waitp);
2507         c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
2508
2509         dma_free_coherent(&rdev->lldi.pdev->dev,
2510                           wq->memsize, wq->queue,
2511                         dma_unmap_addr(wq, mapping));
2512         c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2513         kfree(wq->sw_rq);
2514         c4iw_put_qpid(rdev, wq->qid, uctx);
2515 }
2516
2517 static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2518                            struct c4iw_wr_wait *wr_waitp)
2519 {
2520         struct c4iw_rdev *rdev = &srq->rhp->rdev;
2521         int user = (uctx != &rdev->uctx);
2522         struct t4_srq *wq = &srq->wq;
2523         struct fw_ri_res_wr *res_wr;
2524         struct fw_ri_res *res;
2525         struct sk_buff *skb;
2526         int wr_len;
2527         int eqsize;
2528         int ret = -ENOMEM;
2529
2530         wq->qid = c4iw_get_qpid(rdev, uctx);
2531         if (!wq->qid)
2532                 goto err;
2533
2534         if (!user) {
2535                 wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
2536                                     GFP_KERNEL);
2537                 if (!wq->sw_rq)
2538                         goto err_put_qpid;
2539                 wq->pending_wrs = kcalloc(srq->wq.size,
2540                                           sizeof(*srq->wq.pending_wrs),
2541                                           GFP_KERNEL);
2542                 if (!wq->pending_wrs)
2543                         goto err_free_sw_rq;
2544         }
2545
2546         wq->rqt_size = wq->size;
2547         wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
2548         if (!wq->rqt_hwaddr)
2549                 goto err_free_pending_wrs;
2550         wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2551                 T4_RQT_ENTRY_SHIFT;
2552
2553         wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2554                                        &wq->dma_addr, GFP_KERNEL);
2555         if (!wq->queue)
2556                 goto err_free_rqtpool;
2557
2558         dma_unmap_addr_set(wq, mapping, wq->dma_addr);
2559
2560         wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
2561                                       &wq->bar2_qid,
2562                         user ? &wq->bar2_pa : NULL);
2563
2564         /*
2565          * User mode must have bar2 access.
2566          */
2567
2568         if (user && !wq->bar2_va) {
2569                 pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
2570                         pci_name(rdev->lldi.pdev), wq->qid);
2571                 ret = -EINVAL;
2572                 goto err_free_queue;
2573         }
2574
2575         /* build fw_ri_res_wr */
2576         wr_len = sizeof(*res_wr) + sizeof(*res);
2577
2578         skb = alloc_skb(wr_len, GFP_KERNEL);
2579         if (!skb)
2580                 goto err_free_queue;
2581         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2582
2583         res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2584         memset(res_wr, 0, wr_len);
2585         res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2586                         FW_RI_RES_WR_NRES_V(1) |
2587                         FW_WR_COMPL_F);
2588         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2589         res_wr->cookie = (uintptr_t)wr_waitp;
2590         res = res_wr->res;
2591         res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2592         res->u.srq.op = FW_RI_RES_OP_WRITE;
2593
2594         /*
2595          * eqsize is the number of 64B entries plus the status page size.
2596          */
2597         eqsize = wq->size * T4_RQ_NUM_SLOTS +
2598                 rdev->hw_queue.t4_eq_status_entries;
2599         res->u.srq.eqid = cpu_to_be32(wq->qid);
2600         res->u.srq.fetchszm_to_iqid =
2601                                                 /* no host cidx updates */
2602                 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
2603                 FW_RI_RES_WR_CPRIO_V(0) |       /* don't keep in chip cache */
2604                 FW_RI_RES_WR_PCIECHN_V(0) |     /* set by uP at ri_init time */
2605                 FW_RI_RES_WR_FETCHRO_V(0));     /* relaxed_ordering */
2606         res->u.srq.dcaen_to_eqsize =
2607                 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
2608                 FW_RI_RES_WR_DCACPU_V(0) |
2609                 FW_RI_RES_WR_FBMIN_V(2) |
2610                 FW_RI_RES_WR_FBMAX_V(3) |
2611                 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
2612                 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
2613                 FW_RI_RES_WR_EQSIZE_V(eqsize));
2614         res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
2615         res->u.srq.srqid = cpu_to_be32(srq->idx);
2616         res->u.srq.pdid = cpu_to_be32(srq->pdid);
2617         res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
2618         res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
2619                         rdev->lldi.vr->rq.start);
2620
2621         c4iw_init_wr_wait(wr_waitp);
2622
2623         ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
2624         if (ret)
2625                 goto err_free_queue;
2626
2627         pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
2628                         " bar2_addr %p rqt addr 0x%x size %d\n",
2629                         __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
2630                         (u64)virt_to_phys(wq->queue), wq->bar2_va,
2631                         wq->rqt_hwaddr, wq->rqt_size);
2632
2633         return 0;
2634 err_free_queue:
2635         dma_free_coherent(&rdev->lldi.pdev->dev,
2636                           wq->memsize, wq->queue,
2637                         dma_unmap_addr(wq, mapping));
2638 err_free_rqtpool:
2639         c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2640 err_free_pending_wrs:
2641         if (!user)
2642                 kfree(wq->pending_wrs);
2643 err_free_sw_rq:
2644         if (!user)
2645                 kfree(wq->sw_rq);
2646 err_put_qpid:
2647         c4iw_put_qpid(rdev, wq->qid, uctx);
2648 err:
2649         return ret;
2650 }
2651
2652 void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
2653 {
2654         u64 *src, *dst;
2655
2656         src = (u64 *)wqe;
2657         dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
2658         while (len16) {
2659                 *dst++ = *src++;
2660                 if (dst >= (u64 *)&srq->queue[srq->size])
2661                         dst = (u64 *)srq->queue;
2662                 *dst++ = *src++;
2663                 if (dst >= (u64 *)&srq->queue[srq->size])
2664                         dst = (u64 *)srq->queue;
2665                 len16--;
2666         }
2667 }
2668
2669 int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
2670                                struct ib_udata *udata)
2671 {
2672         struct ib_pd *pd = ib_srq->pd;
2673         struct c4iw_dev *rhp;
2674         struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2675         struct c4iw_pd *php;
2676         struct c4iw_create_srq_resp uresp;
2677         struct c4iw_ucontext *ucontext;
2678         struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
2679         int rqsize;
2680         int ret;
2681         int wr_len;
2682
2683         pr_debug("%s ib_pd %p\n", __func__, pd);
2684
2685         php = to_c4iw_pd(pd);
2686         rhp = php->rhp;
2687
2688         if (!rhp->rdev.lldi.vr->srq.size)
2689                 return -EINVAL;
2690         if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2691                 return -E2BIG;
2692         if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
2693                 return -E2BIG;
2694
2695         /*
2696          * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
2697          */
2698         rqsize = attrs->attr.max_wr + 1;
2699         rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
2700
2701         ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2702                                              ibucontext);
2703
2704         srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2705         if (!srq->wr_waitp)
2706                 return -ENOMEM;
2707
2708         srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
2709         if (srq->idx < 0) {
2710                 ret = -ENOMEM;
2711                 goto err_free_wr_wait;
2712         }
2713
2714         wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
2715         srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
2716         if (!srq->destroy_skb) {
2717                 ret = -ENOMEM;
2718                 goto err_free_srq_idx;
2719         }
2720
2721         srq->rhp = rhp;
2722         srq->pdid = php->pdid;
2723
2724         srq->wq.size = rqsize;
2725         srq->wq.memsize =
2726                 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2727                 sizeof(*srq->wq.queue);
2728         if (ucontext)
2729                 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
2730
2731         ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
2732                         &rhp->rdev.uctx, srq->wr_waitp);
2733         if (ret)
2734                 goto err_free_skb;
2735         attrs->attr.max_wr = rqsize - 1;
2736
2737         if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
2738                 srq->flags = T4_SRQ_LIMIT_SUPPORT;
2739
2740         if (udata) {
2741                 srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
2742                 if (!srq_key_mm) {
2743                         ret = -ENOMEM;
2744                         goto err_free_queue;
2745                 }
2746                 srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
2747                 if (!srq_db_key_mm) {
2748                         ret = -ENOMEM;
2749                         goto err_free_srq_key_mm;
2750                 }
2751                 memset(&uresp, 0, sizeof(uresp));
2752                 uresp.flags = srq->flags;
2753                 uresp.qid_mask = rhp->rdev.qpmask;
2754                 uresp.srqid = srq->wq.qid;
2755                 uresp.srq_size = srq->wq.size;
2756                 uresp.srq_memsize = srq->wq.memsize;
2757                 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
2758                 spin_lock(&ucontext->mmap_lock);
2759                 uresp.srq_key = ucontext->key;
2760                 ucontext->key += PAGE_SIZE;
2761                 uresp.srq_db_gts_key = ucontext->key;
2762                 ucontext->key += PAGE_SIZE;
2763                 spin_unlock(&ucontext->mmap_lock);
2764                 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2765                 if (ret)
2766                         goto err_free_srq_db_key_mm;
2767                 srq_key_mm->key = uresp.srq_key;
2768                 srq_key_mm->addr = virt_to_phys(srq->wq.queue);
2769                 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
2770                 insert_mmap(ucontext, srq_key_mm);
2771                 srq_db_key_mm->key = uresp.srq_db_gts_key;
2772                 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
2773                 srq_db_key_mm->len = PAGE_SIZE;
2774                 insert_mmap(ucontext, srq_db_key_mm);
2775         }
2776
2777         pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
2778                  __func__, srq->wq.qid, srq->idx, srq->wq.size,
2779                         (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
2780
2781         spin_lock_init(&srq->lock);
2782         return 0;
2783
2784 err_free_srq_db_key_mm:
2785         kfree(srq_db_key_mm);
2786 err_free_srq_key_mm:
2787         kfree(srq_key_mm);
2788 err_free_queue:
2789         free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2790                        srq->wr_waitp);
2791 err_free_skb:
2792         kfree_skb(srq->destroy_skb);
2793 err_free_srq_idx:
2794         c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2795 err_free_wr_wait:
2796         c4iw_put_wr_wait(srq->wr_waitp);
2797         return ret;
2798 }
2799
2800 int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
2801 {
2802         struct c4iw_dev *rhp;
2803         struct c4iw_srq *srq;
2804         struct c4iw_ucontext *ucontext;
2805
2806         srq = to_c4iw_srq(ibsrq);
2807         rhp = srq->rhp;
2808
2809         pr_debug("%s id %d\n", __func__, srq->wq.qid);
2810         ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2811                                              ibucontext);
2812         free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2813                        srq->wr_waitp);
2814         c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2815         c4iw_put_wr_wait(srq->wr_waitp);
2816         return 0;
2817 }