IB: split struct ib_send_wr
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb4 / qp.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
e4dd23d7
PG
32
33#include <linux/module.h>
34
cfdda9d7
SW
35#include "iw_cxgb4.h"
36
2c974781
VP
37static int db_delay_usecs = 1;
38module_param(db_delay_usecs, int, 0644);
39MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40
a9c77198 41static int ocqp_support = 1;
c6d7b267 42module_param(ocqp_support, int, 0644);
a9c77198 43MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
c6d7b267 44
3cbdb928 45int db_fc_threshold = 1000;
422eea0a 46module_param(db_fc_threshold, int, 0644);
3cbdb928
VP
47MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
50
51int db_coalescing_threshold;
52module_param(db_coalescing_threshold, int, 0644);
53MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
422eea0a 56
42b6a949
VP
57static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60
4c2c5763
HS
61static int alloc_ird(struct c4iw_dev *dev, u32 ird)
62{
63 int ret = 0;
64
65 spin_lock_irq(&dev->lock);
66 if (ird <= dev->avail_ird)
67 dev->avail_ird -= ird;
68 else
69 ret = -ENOMEM;
70 spin_unlock_irq(&dev->lock);
71
72 if (ret)
73 dev_warn(&dev->rdev.lldi.pdev->dev,
74 "device IRD resources exhausted\n");
75
76 return ret;
77}
78
79static void free_ird(struct c4iw_dev *dev, int ird)
80{
81 spin_lock_irq(&dev->lock);
82 dev->avail_ird += ird;
83 spin_unlock_irq(&dev->lock);
84}
85
2f5b48c3
SW
86static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
87{
88 unsigned long flag;
89 spin_lock_irqsave(&qhp->lock, flag);
90 qhp->attr.state = state;
91 spin_unlock_irqrestore(&qhp->lock, flag);
92}
93
c6d7b267
SW
94static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
95{
96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
97}
98
99static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
100{
101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102 pci_unmap_addr(sq, mapping));
103}
104
105static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
106{
107 if (t4_sq_onchip(sq))
108 dealloc_oc_sq(rdev, sq);
109 else
110 dealloc_host_sq(rdev, sq);
111}
112
113static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
114{
f079af7a 115 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
c6d7b267
SW
116 return -ENOSYS;
117 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
118 if (!sq->dma_addr)
119 return -ENOMEM;
120 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
121 rdev->lldi.vr->ocq.start;
122 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
123 rdev->lldi.vr->ocq.start);
124 sq->flags |= T4_SQ_ONCHIP;
125 return 0;
126}
127
128static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
129{
130 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
131 &(sq->dma_addr), GFP_KERNEL);
132 if (!sq->queue)
133 return -ENOMEM;
134 sq->phys_addr = virt_to_phys(sq->queue);
135 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
136 return 0;
137}
138
5b0c2759
TLSC
139static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
140{
141 int ret = -ENOSYS;
142 if (user)
143 ret = alloc_oc_sq(rdev, sq);
144 if (ret)
145 ret = alloc_host_sq(rdev, sq);
146 return ret;
147}
148
cfdda9d7
SW
149static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
150 struct c4iw_dev_ucontext *uctx)
151{
152 /*
153 * uP clears EQ contexts when the connection exits rdma mode,
154 * so no need to post a RESET WR for these EQs.
155 */
156 dma_free_coherent(&(rdev->lldi.pdev->dev),
157 wq->rq.memsize, wq->rq.queue,
f38926aa 158 dma_unmap_addr(&wq->rq, mapping));
c6d7b267 159 dealloc_sq(rdev, &wq->sq);
cfdda9d7
SW
160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
161 kfree(wq->rq.sw_rq);
162 kfree(wq->sq.sw_sq);
163 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
164 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
165 return 0;
166}
167
74217d4c
H
168/*
169 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
170 * then this is a user mapping so compute the page-aligned physical address
171 * for mapping.
172 */
173void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
174 enum cxgb4_bar2_qtype qtype,
175 unsigned int *pbar2_qid, u64 *pbar2_pa)
176{
177 u64 bar2_qoffset;
178 int ret;
179
180 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
181 pbar2_pa ? 1 : 0,
182 &bar2_qoffset, pbar2_qid);
183 if (ret)
184 return NULL;
185
186 if (pbar2_pa)
187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
188 return rdev->bar2_kva + bar2_qoffset;
189}
190
cfdda9d7
SW
191static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
192 struct t4_cq *rcq, struct t4_cq *scq,
193 struct c4iw_dev_ucontext *uctx)
194{
195 int user = (uctx != &rdev->uctx);
196 struct fw_ri_res_wr *res_wr;
197 struct fw_ri_res *res;
198 int wr_len;
199 struct c4iw_wr_wait wr_wait;
200 struct sk_buff *skb;
9919d5bd 201 int ret = 0;
cfdda9d7
SW
202 int eqsize;
203
204 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
205 if (!wq->sq.qid)
206 return -ENOMEM;
207
208 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
c079c287
EG
209 if (!wq->rq.qid) {
210 ret = -ENOMEM;
211 goto free_sq_qid;
212 }
cfdda9d7
SW
213
214 if (!user) {
215 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
216 GFP_KERNEL);
c079c287
EG
217 if (!wq->sq.sw_sq) {
218 ret = -ENOMEM;
219 goto free_rq_qid;
220 }
cfdda9d7
SW
221
222 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
223 GFP_KERNEL);
c079c287
EG
224 if (!wq->rq.sw_rq) {
225 ret = -ENOMEM;
226 goto free_sw_sq;
227 }
cfdda9d7
SW
228 }
229
230 /*
66eb19af 231 * RQT must be a power of 2 and at least 16 deep.
cfdda9d7 232 */
66eb19af 233 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
cfdda9d7 234 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
c079c287
EG
235 if (!wq->rq.rqt_hwaddr) {
236 ret = -ENOMEM;
237 goto free_sw_rq;
238 }
cfdda9d7 239
5b0c2759
TLSC
240 ret = alloc_sq(rdev, &wq->sq, user);
241 if (ret)
242 goto free_hwaddr;
cfdda9d7 243 memset(wq->sq.queue, 0, wq->sq.memsize);
f38926aa 244 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
cfdda9d7
SW
245
246 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
247 wq->rq.memsize, &(wq->rq.dma_addr),
248 GFP_KERNEL);
55e57a78
WY
249 if (!wq->rq.queue) {
250 ret = -ENOMEM;
c079c287 251 goto free_sq;
55e57a78 252 }
cfdda9d7
SW
253 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
254 __func__, wq->sq.queue,
255 (unsigned long long)virt_to_phys(wq->sq.queue),
256 wq->rq.queue,
257 (unsigned long long)virt_to_phys(wq->rq.queue));
258 memset(wq->rq.queue, 0, wq->rq.memsize);
f38926aa 259 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
cfdda9d7
SW
260
261 wq->db = rdev->lldi.db_reg;
fa658a98 262
74217d4c
H
263 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
264 &wq->sq.bar2_qid,
265 user ? &wq->sq.bar2_pa : NULL);
266 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
267 &wq->rq.bar2_qid,
268 user ? &wq->rq.bar2_pa : NULL);
269
270 /*
271 * User mode must have bar2 access.
272 */
273 if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
274 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
275 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
276 goto free_dma;
cfdda9d7 277 }
74217d4c 278
cfdda9d7
SW
279 wq->rdev = rdev;
280 wq->rq.msn = 1;
281
282 /* build fw_ri_res_wr */
283 wr_len = sizeof *res_wr + 2 * sizeof *res;
284
d3c814e8 285 skb = alloc_skb(wr_len, GFP_KERNEL);
cfdda9d7
SW
286 if (!skb) {
287 ret = -ENOMEM;
c079c287 288 goto free_dma;
cfdda9d7
SW
289 }
290 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
291
292 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
293 memset(res_wr, 0, wr_len);
294 res_wr->op_nres = cpu_to_be32(
e2ac9628 295 FW_WR_OP_V(FW_RI_RES_WR) |
cf7fe64a 296 FW_RI_RES_WR_NRES_V(2) |
e2ac9628 297 FW_WR_COMPL_F);
cfdda9d7 298 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
6198dd8d 299 res_wr->cookie = (uintptr_t)&wr_wait;
cfdda9d7
SW
300 res = res_wr->res;
301 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
302 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
303
304 /*
305 * eqsize is the number of 64B entries plus the status page size.
306 */
04e10e21
HS
307 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
308 rdev->hw_queue.t4_eq_status_entries;
cfdda9d7
SW
309
310 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
cf7fe64a
HS
311 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
312 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
313 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
314 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
315 FW_RI_RES_WR_IQID_V(scq->cqid));
cfdda9d7 316 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
cf7fe64a
HS
317 FW_RI_RES_WR_DCAEN_V(0) |
318 FW_RI_RES_WR_DCACPU_V(0) |
319 FW_RI_RES_WR_FBMIN_V(2) |
320 FW_RI_RES_WR_FBMAX_V(2) |
321 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
322 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
323 FW_RI_RES_WR_EQSIZE_V(eqsize));
cfdda9d7
SW
324 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
325 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
326 res++;
327 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
328 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
329
330 /*
331 * eqsize is the number of 64B entries plus the status page size.
332 */
04e10e21
HS
333 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
334 rdev->hw_queue.t4_eq_status_entries;
cfdda9d7 335 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
cf7fe64a
HS
336 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
337 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
338 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
339 FW_RI_RES_WR_IQID_V(rcq->cqid));
cfdda9d7 340 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
cf7fe64a
HS
341 FW_RI_RES_WR_DCAEN_V(0) |
342 FW_RI_RES_WR_DCACPU_V(0) |
343 FW_RI_RES_WR_FBMIN_V(2) |
344 FW_RI_RES_WR_FBMAX_V(2) |
345 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
346 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
347 FW_RI_RES_WR_EQSIZE_V(eqsize));
cfdda9d7
SW
348 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
349 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
350
351 c4iw_init_wr_wait(&wr_wait);
352
353 ret = c4iw_ofld_send(rdev, skb);
354 if (ret)
c079c287 355 goto free_dma;
aadc4df3 356 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
cfdda9d7 357 if (ret)
c079c287 358 goto free_dma;
cfdda9d7 359
74217d4c 360 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
cfdda9d7 361 __func__, wq->sq.qid, wq->rq.qid, wq->db,
74217d4c 362 wq->sq.bar2_va, wq->rq.bar2_va);
cfdda9d7
SW
363
364 return 0;
c079c287 365free_dma:
cfdda9d7
SW
366 dma_free_coherent(&(rdev->lldi.pdev->dev),
367 wq->rq.memsize, wq->rq.queue,
f38926aa 368 dma_unmap_addr(&wq->rq, mapping));
c079c287 369free_sq:
c6d7b267 370 dealloc_sq(rdev, &wq->sq);
c079c287 371free_hwaddr:
cfdda9d7 372 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
c079c287 373free_sw_rq:
cfdda9d7 374 kfree(wq->rq.sw_rq);
c079c287 375free_sw_sq:
cfdda9d7 376 kfree(wq->sq.sw_sq);
c079c287 377free_rq_qid:
cfdda9d7 378 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
c079c287 379free_sq_qid:
cfdda9d7 380 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
c079c287 381 return ret;
cfdda9d7
SW
382}
383
d37ac31d
SW
384static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
385 struct ib_send_wr *wr, int max, u32 *plenp)
cfdda9d7 386{
d37ac31d
SW
387 u8 *dstp, *srcp;
388 u32 plen = 0;
cfdda9d7 389 int i;
d37ac31d
SW
390 int rem, len;
391
392 dstp = (u8 *)immdp->data;
393 for (i = 0; i < wr->num_sge; i++) {
394 if ((plen + wr->sg_list[i].length) > max)
395 return -EMSGSIZE;
396 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
397 plen += wr->sg_list[i].length;
398 rem = wr->sg_list[i].length;
399 while (rem) {
400 if (dstp == (u8 *)&sq->queue[sq->size])
401 dstp = (u8 *)sq->queue;
402 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
403 len = rem;
404 else
405 len = (u8 *)&sq->queue[sq->size] - dstp;
406 memcpy(dstp, srcp, len);
407 dstp += len;
408 srcp += len;
409 rem -= len;
410 }
411 }
13fecb83
SW
412 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
413 if (len)
414 memset(dstp, 0, len);
d37ac31d
SW
415 immdp->op = FW_RI_DATA_IMMD;
416 immdp->r1 = 0;
417 immdp->r2 = 0;
418 immdp->immdlen = cpu_to_be32(plen);
419 *plenp = plen;
420 return 0;
421}
422
423static int build_isgl(__be64 *queue_start, __be64 *queue_end,
424 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
425 int num_sge, u32 *plenp)
426
427{
428 int i;
429 u32 plen = 0;
430 __be64 *flitp = (__be64 *)isglp->sge;
431
432 for (i = 0; i < num_sge; i++) {
433 if ((plen + sg_list[i].length) < plen)
434 return -EMSGSIZE;
435 plen += sg_list[i].length;
436 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
437 sg_list[i].length);
438 if (++flitp == queue_end)
439 flitp = queue_start;
440 *flitp = cpu_to_be64(sg_list[i].addr);
441 if (++flitp == queue_end)
442 flitp = queue_start;
443 }
13fecb83 444 *flitp = (__force __be64)0;
d37ac31d
SW
445 isglp->op = FW_RI_DATA_ISGL;
446 isglp->r1 = 0;
447 isglp->nsge = cpu_to_be16(num_sge);
448 isglp->r2 = 0;
449 if (plenp)
450 *plenp = plen;
451 return 0;
452}
453
454static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
455 struct ib_send_wr *wr, u8 *len16)
456{
cfdda9d7
SW
457 u32 plen;
458 int size;
d37ac31d 459 int ret;
cfdda9d7
SW
460
461 if (wr->num_sge > T4_MAX_SEND_SGE)
462 return -EINVAL;
463 switch (wr->opcode) {
464 case IB_WR_SEND:
465 if (wr->send_flags & IB_SEND_SOLICITED)
466 wqe->send.sendop_pkd = cpu_to_be32(
cf7fe64a 467 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
cfdda9d7
SW
468 else
469 wqe->send.sendop_pkd = cpu_to_be32(
cf7fe64a 470 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
cfdda9d7
SW
471 wqe->send.stag_inv = 0;
472 break;
473 case IB_WR_SEND_WITH_INV:
474 if (wr->send_flags & IB_SEND_SOLICITED)
475 wqe->send.sendop_pkd = cpu_to_be32(
cf7fe64a 476 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
cfdda9d7
SW
477 else
478 wqe->send.sendop_pkd = cpu_to_be32(
cf7fe64a 479 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
cfdda9d7
SW
480 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
481 break;
482
483 default:
484 return -EINVAL;
485 }
c3f98fa2
SW
486 wqe->send.r3 = 0;
487 wqe->send.r4 = 0;
d37ac31d 488
cfdda9d7
SW
489 plen = 0;
490 if (wr->num_sge) {
491 if (wr->send_flags & IB_SEND_INLINE) {
d37ac31d
SW
492 ret = build_immd(sq, wqe->send.u.immd_src, wr,
493 T4_MAX_SEND_INLINE, &plen);
494 if (ret)
495 return ret;
cfdda9d7
SW
496 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
497 plen;
498 } else {
d37ac31d
SW
499 ret = build_isgl((__be64 *)sq->queue,
500 (__be64 *)&sq->queue[sq->size],
501 wqe->send.u.isgl_src,
502 wr->sg_list, wr->num_sge, &plen);
503 if (ret)
504 return ret;
cfdda9d7
SW
505 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
506 wr->num_sge * sizeof(struct fw_ri_sge);
507 }
508 } else {
509 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
510 wqe->send.u.immd_src[0].r1 = 0;
511 wqe->send.u.immd_src[0].r2 = 0;
512 wqe->send.u.immd_src[0].immdlen = 0;
513 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
d37ac31d 514 plen = 0;
cfdda9d7
SW
515 }
516 *len16 = DIV_ROUND_UP(size, 16);
517 wqe->send.plen = cpu_to_be32(plen);
518 return 0;
519}
520
d37ac31d
SW
521static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
522 struct ib_send_wr *wr, u8 *len16)
cfdda9d7 523{
cfdda9d7
SW
524 u32 plen;
525 int size;
d37ac31d 526 int ret;
cfdda9d7 527
d37ac31d 528 if (wr->num_sge > T4_MAX_SEND_SGE)
cfdda9d7
SW
529 return -EINVAL;
530 wqe->write.r2 = 0;
e622f2f4
CH
531 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
532 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
cfdda9d7
SW
533 if (wr->num_sge) {
534 if (wr->send_flags & IB_SEND_INLINE) {
d37ac31d
SW
535 ret = build_immd(sq, wqe->write.u.immd_src, wr,
536 T4_MAX_WRITE_INLINE, &plen);
537 if (ret)
538 return ret;
cfdda9d7
SW
539 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
540 plen;
541 } else {
d37ac31d
SW
542 ret = build_isgl((__be64 *)sq->queue,
543 (__be64 *)&sq->queue[sq->size],
544 wqe->write.u.isgl_src,
545 wr->sg_list, wr->num_sge, &plen);
546 if (ret)
547 return ret;
cfdda9d7
SW
548 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
549 wr->num_sge * sizeof(struct fw_ri_sge);
550 }
551 } else {
552 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
553 wqe->write.u.immd_src[0].r1 = 0;
554 wqe->write.u.immd_src[0].r2 = 0;
555 wqe->write.u.immd_src[0].immdlen = 0;
556 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
d37ac31d 557 plen = 0;
cfdda9d7
SW
558 }
559 *len16 = DIV_ROUND_UP(size, 16);
560 wqe->write.plen = cpu_to_be32(plen);
561 return 0;
562}
563
564static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
565{
566 if (wr->num_sge > 1)
567 return -EINVAL;
568 if (wr->num_sge) {
e622f2f4
CH
569 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
570 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
cfdda9d7 571 >> 32));
e622f2f4 572 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
cfdda9d7
SW
573 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
574 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
575 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
576 >> 32));
577 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
578 } else {
579 wqe->read.stag_src = cpu_to_be32(2);
580 wqe->read.to_src_hi = 0;
581 wqe->read.to_src_lo = 0;
582 wqe->read.stag_sink = cpu_to_be32(2);
583 wqe->read.plen = 0;
584 wqe->read.to_sink_hi = 0;
585 wqe->read.to_sink_lo = 0;
586 }
587 wqe->read.r2 = 0;
588 wqe->read.r5 = 0;
589 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
590 return 0;
591}
592
593static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
594 struct ib_recv_wr *wr, u8 *len16)
595{
d37ac31d 596 int ret;
cfdda9d7 597
d37ac31d
SW
598 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
599 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
600 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
601 if (ret)
602 return ret;
cfdda9d7
SW
603 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
604 wr->num_sge * sizeof(struct fw_ri_sge), 16);
605 return 0;
606}
607
40dbf6ee 608static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
e622f2f4 609 struct ib_send_wr *send_wr, u8 *len16, u8 t5dev)
cfdda9d7 610{
e622f2f4 611 struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
cfdda9d7
SW
612 struct fw_ri_immd *imdp;
613 __be64 *p;
614 int i;
e622f2f4 615 int pbllen = roundup(wr->page_list_len * sizeof(u64), 32);
40dbf6ee 616 int rem;
cfdda9d7 617
e622f2f4 618 if (wr->page_list_len > t4_max_fr_depth(use_dsgl))
cfdda9d7
SW
619 return -EINVAL;
620
621 wqe->fr.qpbinde_to_dcacpu = 0;
e622f2f4 622 wqe->fr.pgsz_shift = wr->page_shift - 12;
cfdda9d7 623 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
e622f2f4 624 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access_flags);
cfdda9d7 625 wqe->fr.len_hi = 0;
e622f2f4
CH
626 wqe->fr.len_lo = cpu_to_be32(wr->length);
627 wqe->fr.stag = cpu_to_be32(wr->rkey);
628 wqe->fr.va_hi = cpu_to_be32(wr->iova_start >> 32);
629 wqe->fr.va_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff);
42b6a949
VP
630
631 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
632 struct c4iw_fr_page_list *c4pl =
e622f2f4 633 to_c4iw_fr_page_list(wr->page_list);
42b6a949
VP
634 struct fw_ri_dsgl *sglp;
635
e622f2f4
CH
636 for (i = 0; i < wr->page_list_len; i++) {
637 wr->page_list->page_list[i] = (__force u64)
638 cpu_to_be64((u64)wr->page_list->page_list[i]);
42b6a949
VP
639 }
640
641 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
642 sglp->op = FW_RI_DATA_DSGL;
643 sglp->r1 = 0;
644 sglp->nsge = cpu_to_be16(1);
645 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
646 sglp->len0 = cpu_to_be32(pbllen);
647
648 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
649 } else {
650 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
651 imdp->op = FW_RI_DATA_IMMD;
652 imdp->r1 = 0;
653 imdp->r2 = 0;
654 imdp->immdlen = cpu_to_be32(pbllen);
655 p = (__be64 *)(imdp + 1);
656 rem = pbllen;
e622f2f4
CH
657 for (i = 0; i < wr->page_list_len; i++) {
658 *p = cpu_to_be64((u64)wr->page_list->page_list[i]);
42b6a949
VP
659 rem -= sizeof(*p);
660 if (++p == (__be64 *)&sq->queue[sq->size])
661 p = (__be64 *)sq->queue;
662 }
663 BUG_ON(rem < 0);
664 while (rem) {
665 *p = 0;
666 rem -= sizeof(*p);
667 if (++p == (__be64 *)&sq->queue[sq->size])
668 p = (__be64 *)sq->queue;
669 }
670 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
671 + pbllen, 16);
cfdda9d7
SW
672 }
673 return 0;
674}
675
676static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
677 u8 *len16)
678{
679 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
680 wqe->inv.r2 = 0;
681 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
682 return 0;
683}
684
685void c4iw_qp_add_ref(struct ib_qp *qp)
686{
687 PDBG("%s ib_qp %p\n", __func__, qp);
688 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
689}
690
691void c4iw_qp_rem_ref(struct ib_qp *qp)
692{
693 PDBG("%s ib_qp %p\n", __func__, qp);
694 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
695 wake_up(&(to_c4iw_qp(qp)->wait));
696}
697
05eb2389
SW
698static void add_to_fc_list(struct list_head *head, struct list_head *entry)
699{
700 if (list_empty(entry))
701 list_add_tail(entry, head);
702}
703
704static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
705{
706 unsigned long flags;
707
708 spin_lock_irqsave(&qhp->rhp->lock, flags);
709 spin_lock(&qhp->lock);
fa658a98
SW
710 if (qhp->rhp->db_state == NORMAL)
711 t4_ring_sq_db(&qhp->wq, inc,
712 is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
713 else {
05eb2389
SW
714 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
715 qhp->wq.sq.wq_pidx_inc += inc;
716 }
717 spin_unlock(&qhp->lock);
718 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
719 return 0;
720}
721
722static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
723{
724 unsigned long flags;
725
726 spin_lock_irqsave(&qhp->rhp->lock, flags);
727 spin_lock(&qhp->lock);
fa658a98
SW
728 if (qhp->rhp->db_state == NORMAL)
729 t4_ring_rq_db(&qhp->wq, inc,
730 is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
731 else {
05eb2389
SW
732 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
733 qhp->wq.rq.wq_pidx_inc += inc;
734 }
735 spin_unlock(&qhp->lock);
736 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
737 return 0;
738}
739
cfdda9d7
SW
740int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
741 struct ib_send_wr **bad_wr)
742{
743 int err = 0;
744 u8 len16 = 0;
745 enum fw_wr_opcodes fw_opcode = 0;
746 enum fw_ri_wr_flags fw_flags;
747 struct c4iw_qp *qhp;
fa658a98 748 union t4_wr *wqe = NULL;
cfdda9d7
SW
749 u32 num_wrs;
750 struct t4_swsqe *swsqe;
751 unsigned long flag;
752 u16 idx = 0;
753
754 qhp = to_c4iw_qp(ibqp);
755 spin_lock_irqsave(&qhp->lock, flag);
756 if (t4_wq_in_error(&qhp->wq)) {
757 spin_unlock_irqrestore(&qhp->lock, flag);
758 return -EINVAL;
759 }
760 num_wrs = t4_sq_avail(&qhp->wq);
761 if (num_wrs == 0) {
762 spin_unlock_irqrestore(&qhp->lock, flag);
763 return -ENOMEM;
764 }
765 while (wr) {
766 if (num_wrs == 0) {
767 err = -ENOMEM;
768 *bad_wr = wr;
769 break;
770 }
d37ac31d
SW
771 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
772 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
773
cfdda9d7
SW
774 fw_flags = 0;
775 if (wr->send_flags & IB_SEND_SOLICITED)
776 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
ba32de9d 777 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
cfdda9d7
SW
778 fw_flags |= FW_RI_COMPLETION_FLAG;
779 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
780 switch (wr->opcode) {
781 case IB_WR_SEND_WITH_INV:
782 case IB_WR_SEND:
783 if (wr->send_flags & IB_SEND_FENCE)
784 fw_flags |= FW_RI_READ_FENCE_FLAG;
785 fw_opcode = FW_RI_SEND_WR;
786 if (wr->opcode == IB_WR_SEND)
787 swsqe->opcode = FW_RI_SEND;
788 else
789 swsqe->opcode = FW_RI_SEND_WITH_INV;
d37ac31d 790 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
cfdda9d7
SW
791 break;
792 case IB_WR_RDMA_WRITE:
793 fw_opcode = FW_RI_RDMA_WRITE_WR;
794 swsqe->opcode = FW_RI_RDMA_WRITE;
d37ac31d 795 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
cfdda9d7
SW
796 break;
797 case IB_WR_RDMA_READ:
2f1fb507 798 case IB_WR_RDMA_READ_WITH_INV:
cfdda9d7
SW
799 fw_opcode = FW_RI_RDMA_READ_WR;
800 swsqe->opcode = FW_RI_READ_REQ;
2f1fb507 801 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
410ade4c 802 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
2f1fb507
SW
803 else
804 fw_flags = 0;
cfdda9d7
SW
805 err = build_rdma_read(wqe, wr, &len16);
806 if (err)
807 break;
808 swsqe->read_len = wr->sg_list[0].length;
809 if (!qhp->wq.sq.oldest_read)
810 qhp->wq.sq.oldest_read = swsqe;
811 break;
812 case IB_WR_FAST_REG_MR:
813 fw_opcode = FW_RI_FR_NSMR_WR;
814 swsqe->opcode = FW_RI_FAST_REGISTER;
42b6a949
VP
815 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
816 is_t5(
817 qhp->rhp->rdev.lldi.adapter_type) ?
818 1 : 0);
cfdda9d7
SW
819 break;
820 case IB_WR_LOCAL_INV:
4ab1eb9c
SW
821 if (wr->send_flags & IB_SEND_FENCE)
822 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
cfdda9d7
SW
823 fw_opcode = FW_RI_INV_LSTAG_WR;
824 swsqe->opcode = FW_RI_LOCAL_INV;
825 err = build_inv_stag(wqe, wr, &len16);
826 break;
827 default:
828 PDBG("%s post of type=%d TBD!\n", __func__,
829 wr->opcode);
830 err = -EINVAL;
831 }
832 if (err) {
833 *bad_wr = wr;
834 break;
835 }
836 swsqe->idx = qhp->wq.sq.pidx;
837 swsqe->complete = 0;
ba32de9d
SW
838 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
839 qhp->sq_sig_all;
1cf24dce 840 swsqe->flushed = 0;
cfdda9d7 841 swsqe->wr_id = wr->wr_id;
7730b4c7
HS
842 if (c4iw_wr_log) {
843 swsqe->sge_ts = cxgb4_read_sge_timestamp(
844 qhp->rhp->rdev.lldi.ports[0]);
845 getnstimeofday(&swsqe->host_ts);
846 }
cfdda9d7
SW
847
848 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
849
850 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
851 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
852 swsqe->opcode, swsqe->read_len);
853 wr = wr->next;
854 num_wrs--;
d37ac31d
SW
855 t4_sq_produce(&qhp->wq, len16);
856 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
cfdda9d7 857 }
05eb2389 858 if (!qhp->rhp->rdev.status_page->db_off) {
fa658a98
SW
859 t4_ring_sq_db(&qhp->wq, idx,
860 is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
05eb2389
SW
861 spin_unlock_irqrestore(&qhp->lock, flag);
862 } else {
863 spin_unlock_irqrestore(&qhp->lock, flag);
864 ring_kernel_sq_db(qhp, idx);
865 }
cfdda9d7
SW
866 return err;
867}
868
869int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
870 struct ib_recv_wr **bad_wr)
871{
872 int err = 0;
873 struct c4iw_qp *qhp;
fa658a98 874 union t4_recv_wr *wqe = NULL;
cfdda9d7
SW
875 u32 num_wrs;
876 u8 len16 = 0;
877 unsigned long flag;
878 u16 idx = 0;
879
880 qhp = to_c4iw_qp(ibqp);
881 spin_lock_irqsave(&qhp->lock, flag);
882 if (t4_wq_in_error(&qhp->wq)) {
883 spin_unlock_irqrestore(&qhp->lock, flag);
884 return -EINVAL;
885 }
886 num_wrs = t4_rq_avail(&qhp->wq);
887 if (num_wrs == 0) {
888 spin_unlock_irqrestore(&qhp->lock, flag);
889 return -ENOMEM;
890 }
891 while (wr) {
892 if (wr->num_sge > T4_MAX_RECV_SGE) {
893 err = -EINVAL;
894 *bad_wr = wr;
895 break;
896 }
d37ac31d
SW
897 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
898 qhp->wq.rq.wq_pidx *
899 T4_EQ_ENTRY_SIZE);
cfdda9d7
SW
900 if (num_wrs)
901 err = build_rdma_recv(qhp, wqe, wr, &len16);
902 else
903 err = -ENOMEM;
904 if (err) {
905 *bad_wr = wr;
906 break;
907 }
908
909 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
7730b4c7
HS
910 if (c4iw_wr_log) {
911 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
912 cxgb4_read_sge_timestamp(
913 qhp->rhp->rdev.lldi.ports[0]);
914 getnstimeofday(
915 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
916 }
cfdda9d7
SW
917
918 wqe->recv.opcode = FW_RI_RECV_WR;
919 wqe->recv.r1 = 0;
920 wqe->recv.wrid = qhp->wq.rq.pidx;
921 wqe->recv.r2[0] = 0;
922 wqe->recv.r2[1] = 0;
923 wqe->recv.r2[2] = 0;
924 wqe->recv.len16 = len16;
cfdda9d7
SW
925 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
926 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
d37ac31d
SW
927 t4_rq_produce(&qhp->wq, len16);
928 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
cfdda9d7
SW
929 wr = wr->next;
930 num_wrs--;
cfdda9d7 931 }
05eb2389 932 if (!qhp->rhp->rdev.status_page->db_off) {
fa658a98
SW
933 t4_ring_rq_db(&qhp->wq, idx,
934 is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
05eb2389
SW
935 spin_unlock_irqrestore(&qhp->lock, flag);
936 } else {
937 spin_unlock_irqrestore(&qhp->lock, flag);
938 ring_kernel_rq_db(qhp, idx);
939 }
cfdda9d7
SW
940 return err;
941}
942
943int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
944{
945 return -ENOSYS;
946}
947
948static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
949 u8 *ecode)
950{
951 int status;
952 int tagged;
953 int opcode;
954 int rqtype;
955 int send_inv;
956
957 if (!err_cqe) {
958 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
959 *ecode = 0;
960 return;
961 }
962
963 status = CQE_STATUS(err_cqe);
964 opcode = CQE_OPCODE(err_cqe);
965 rqtype = RQ_TYPE(err_cqe);
966 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
967 (opcode == FW_RI_SEND_WITH_SE_INV);
968 tagged = (opcode == FW_RI_RDMA_WRITE) ||
969 (rqtype && (opcode == FW_RI_READ_RESP));
970
971 switch (status) {
972 case T4_ERR_STAG:
973 if (send_inv) {
974 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
975 *ecode = RDMAP_CANT_INV_STAG;
976 } else {
977 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
978 *ecode = RDMAP_INV_STAG;
979 }
980 break;
981 case T4_ERR_PDID:
982 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
983 if ((opcode == FW_RI_SEND_WITH_INV) ||
984 (opcode == FW_RI_SEND_WITH_SE_INV))
985 *ecode = RDMAP_CANT_INV_STAG;
986 else
987 *ecode = RDMAP_STAG_NOT_ASSOC;
988 break;
989 case T4_ERR_QPID:
990 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
991 *ecode = RDMAP_STAG_NOT_ASSOC;
992 break;
993 case T4_ERR_ACCESS:
994 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
995 *ecode = RDMAP_ACC_VIOL;
996 break;
997 case T4_ERR_WRAP:
998 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
999 *ecode = RDMAP_TO_WRAP;
1000 break;
1001 case T4_ERR_BOUND:
1002 if (tagged) {
1003 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1004 *ecode = DDPT_BASE_BOUNDS;
1005 } else {
1006 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1007 *ecode = RDMAP_BASE_BOUNDS;
1008 }
1009 break;
1010 case T4_ERR_INVALIDATE_SHARED_MR:
1011 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1012 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1013 *ecode = RDMAP_CANT_INV_STAG;
1014 break;
1015 case T4_ERR_ECC:
1016 case T4_ERR_ECC_PSTAG:
1017 case T4_ERR_INTERNAL_ERR:
1018 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1019 *ecode = 0;
1020 break;
1021 case T4_ERR_OUT_OF_RQE:
1022 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1023 *ecode = DDPU_INV_MSN_NOBUF;
1024 break;
1025 case T4_ERR_PBL_ADDR_BOUND:
1026 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1027 *ecode = DDPT_BASE_BOUNDS;
1028 break;
1029 case T4_ERR_CRC:
1030 *layer_type = LAYER_MPA|DDP_LLP;
1031 *ecode = MPA_CRC_ERR;
1032 break;
1033 case T4_ERR_MARKER:
1034 *layer_type = LAYER_MPA|DDP_LLP;
1035 *ecode = MPA_MARKER_ERR;
1036 break;
1037 case T4_ERR_PDU_LEN_ERR:
1038 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1039 *ecode = DDPU_MSG_TOOBIG;
1040 break;
1041 case T4_ERR_DDP_VERSION:
1042 if (tagged) {
1043 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1044 *ecode = DDPT_INV_VERS;
1045 } else {
1046 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1047 *ecode = DDPU_INV_VERS;
1048 }
1049 break;
1050 case T4_ERR_RDMA_VERSION:
1051 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1052 *ecode = RDMAP_INV_VERS;
1053 break;
1054 case T4_ERR_OPCODE:
1055 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1056 *ecode = RDMAP_INV_OPCODE;
1057 break;
1058 case T4_ERR_DDP_QUEUE_NUM:
1059 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1060 *ecode = DDPU_INV_QN;
1061 break;
1062 case T4_ERR_MSN:
1063 case T4_ERR_MSN_GAP:
1064 case T4_ERR_MSN_RANGE:
1065 case T4_ERR_IRD_OVERFLOW:
1066 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1067 *ecode = DDPU_INV_MSN_RANGE;
1068 break;
1069 case T4_ERR_TBIT:
1070 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1071 *ecode = 0;
1072 break;
1073 case T4_ERR_MO:
1074 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1075 *ecode = DDPU_INV_MO;
1076 break;
1077 default:
1078 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1079 *ecode = 0;
1080 break;
1081 }
1082}
1083
be4c9bad
RD
1084static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1085 gfp_t gfp)
cfdda9d7
SW
1086{
1087 struct fw_ri_wr *wqe;
1088 struct sk_buff *skb;
1089 struct terminate_message *term;
1090
1091 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1092 qhp->ep->hwtid);
1093
be4c9bad 1094 skb = alloc_skb(sizeof *wqe, gfp);
cfdda9d7 1095 if (!skb)
be4c9bad 1096 return;
cfdda9d7
SW
1097 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1098
1099 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1100 memset(wqe, 0, sizeof *wqe);
e2ac9628 1101 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
cfdda9d7 1102 wqe->flowid_len16 = cpu_to_be32(
e2ac9628
HS
1103 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1104 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
cfdda9d7
SW
1105
1106 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1107 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1108 term = (struct terminate_message *)wqe->u.terminate.termmsg;
d2fe99e8
KS
1109 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1110 term->layer_etype = qhp->attr.layer_etype;
1111 term->ecode = qhp->attr.ecode;
1112 } else
1113 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
be4c9bad 1114 c4iw_ofld_send(&qhp->rhp->rdev, skb);
cfdda9d7
SW
1115}
1116
1117/*
1118 * Assumes qhp lock is held.
1119 */
1120static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
2f5b48c3 1121 struct c4iw_cq *schp)
cfdda9d7
SW
1122{
1123 int count;
678ea9b5 1124 int rq_flushed, sq_flushed;
2f5b48c3 1125 unsigned long flag;
cfdda9d7
SW
1126
1127 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
cfdda9d7 1128
732bee7a 1129 /* locking hierarchy: cq lock first, then qp lock. */
2f5b48c3 1130 spin_lock_irqsave(&rchp->lock, flag);
cfdda9d7 1131 spin_lock(&qhp->lock);
1cf24dce
SW
1132
1133 if (qhp->wq.flushed) {
1134 spin_unlock(&qhp->lock);
1135 spin_unlock_irqrestore(&rchp->lock, flag);
1136 return;
1137 }
1138 qhp->wq.flushed = 1;
1139
1140 c4iw_flush_hw_cq(rchp);
cfdda9d7 1141 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
678ea9b5 1142 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
cfdda9d7 1143 spin_unlock(&qhp->lock);
2f5b48c3 1144 spin_unlock_irqrestore(&rchp->lock, flag);
cfdda9d7 1145
732bee7a 1146 /* locking hierarchy: cq lock first, then qp lock. */
2f5b48c3 1147 spin_lock_irqsave(&schp->lock, flag);
cfdda9d7 1148 spin_lock(&qhp->lock);
1cf24dce
SW
1149 if (schp != rchp)
1150 c4iw_flush_hw_cq(schp);
678ea9b5 1151 sq_flushed = c4iw_flush_sq(qhp);
cfdda9d7 1152 spin_unlock(&qhp->lock);
2f5b48c3 1153 spin_unlock_irqrestore(&schp->lock, flag);
678ea9b5
SW
1154
1155 if (schp == rchp) {
1156 if (t4_clear_cq_armed(&rchp->cq) &&
1157 (rq_flushed || sq_flushed)) {
1158 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1159 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1160 rchp->ibcq.cq_context);
1161 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1162 }
1163 } else {
1164 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1165 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1166 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1167 rchp->ibcq.cq_context);
1168 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1169 }
1170 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1171 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1172 (*schp->ibcq.comp_handler)(&schp->ibcq,
1173 schp->ibcq.cq_context);
1174 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1175 }
581bbe2c 1176 }
cfdda9d7
SW
1177}
1178
2f5b48c3 1179static void flush_qp(struct c4iw_qp *qhp)
cfdda9d7
SW
1180{
1181 struct c4iw_cq *rchp, *schp;
581bbe2c 1182 unsigned long flag;
cfdda9d7 1183
1cf24dce
SW
1184 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1185 schp = to_c4iw_cq(qhp->ibqp.send_cq);
cfdda9d7 1186
1cf24dce 1187 t4_set_wq_in_error(&qhp->wq);
cfdda9d7 1188 if (qhp->ibqp.uobject) {
cfdda9d7 1189 t4_set_cq_in_error(&rchp->cq);
581bbe2c 1190 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
01e7da6b 1191 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
581bbe2c 1192 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
01e7da6b 1193 if (schp != rchp) {
cfdda9d7 1194 t4_set_cq_in_error(&schp->cq);
581bbe2c 1195 spin_lock_irqsave(&schp->comp_handler_lock, flag);
01e7da6b
KS
1196 (*schp->ibcq.comp_handler)(&schp->ibcq,
1197 schp->ibcq.cq_context);
581bbe2c 1198 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
01e7da6b 1199 }
cfdda9d7
SW
1200 return;
1201 }
2f5b48c3 1202 __flush_qp(qhp, rchp, schp);
cfdda9d7
SW
1203}
1204
73d6fcad
SW
1205static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1206 struct c4iw_ep *ep)
cfdda9d7
SW
1207{
1208 struct fw_ri_wr *wqe;
1209 int ret;
cfdda9d7
SW
1210 struct sk_buff *skb;
1211
1212 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
73d6fcad 1213 ep->hwtid);
cfdda9d7 1214
d3c814e8 1215 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
cfdda9d7
SW
1216 if (!skb)
1217 return -ENOMEM;
73d6fcad 1218 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
cfdda9d7
SW
1219
1220 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1221 memset(wqe, 0, sizeof *wqe);
1222 wqe->op_compl = cpu_to_be32(
e2ac9628
HS
1223 FW_WR_OP_V(FW_RI_INIT_WR) |
1224 FW_WR_COMPL_F);
cfdda9d7 1225 wqe->flowid_len16 = cpu_to_be32(
e2ac9628
HS
1226 FW_WR_FLOWID_V(ep->hwtid) |
1227 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
6198dd8d 1228 wqe->cookie = (uintptr_t)&ep->com.wr_wait;
cfdda9d7
SW
1229
1230 wqe->u.fini.type = FW_RI_TYPE_FINI;
cfdda9d7
SW
1231 ret = c4iw_ofld_send(&rhp->rdev, skb);
1232 if (ret)
1233 goto out;
1234
2f5b48c3 1235 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
aadc4df3 1236 qhp->wq.sq.qid, __func__);
cfdda9d7
SW
1237out:
1238 PDBG("%s ret %d\n", __func__, ret);
1239 return ret;
1240}
1241
1242static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1243{
d2fe99e8 1244 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
cfdda9d7
SW
1245 memset(&init->u, 0, sizeof init->u);
1246 switch (p2p_type) {
1247 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1248 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1249 init->u.write.stag_sink = cpu_to_be32(1);
1250 init->u.write.to_sink = cpu_to_be64(1);
1251 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1252 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1253 sizeof(struct fw_ri_immd),
1254 16);
1255 break;
1256 case FW_RI_INIT_P2PTYPE_READ_REQ:
1257 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1258 init->u.read.stag_src = cpu_to_be32(1);
1259 init->u.read.to_src_lo = cpu_to_be32(1);
1260 init->u.read.stag_sink = cpu_to_be32(1);
1261 init->u.read.to_sink_lo = cpu_to_be32(1);
1262 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1263 break;
1264 }
1265}
1266
1267static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1268{
1269 struct fw_ri_wr *wqe;
1270 int ret;
cfdda9d7
SW
1271 struct sk_buff *skb;
1272
4c2c5763
HS
1273 PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
1274 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
cfdda9d7 1275
d3c814e8 1276 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
4c2c5763
HS
1277 if (!skb) {
1278 ret = -ENOMEM;
1279 goto out;
1280 }
1281 ret = alloc_ird(rhp, qhp->attr.max_ird);
1282 if (ret) {
1283 qhp->attr.max_ird = 0;
1284 kfree_skb(skb);
1285 goto out;
1286 }
cfdda9d7
SW
1287 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1288
1289 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1290 memset(wqe, 0, sizeof *wqe);
1291 wqe->op_compl = cpu_to_be32(
e2ac9628
HS
1292 FW_WR_OP_V(FW_RI_INIT_WR) |
1293 FW_WR_COMPL_F);
cfdda9d7 1294 wqe->flowid_len16 = cpu_to_be32(
e2ac9628
HS
1295 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1296 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
cfdda9d7 1297
6198dd8d 1298 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
cfdda9d7
SW
1299
1300 wqe->u.init.type = FW_RI_TYPE_INIT;
1301 wqe->u.init.mpareqbit_p2ptype =
cf7fe64a
HS
1302 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1303 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
cfdda9d7
SW
1304 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1305 if (qhp->attr.mpa_attr.recv_marker_enabled)
1306 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1307 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1308 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1309 if (qhp->attr.mpa_attr.crc_enabled)
1310 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1311
1312 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1313 FW_RI_QP_RDMA_WRITE_ENABLE |
1314 FW_RI_QP_BIND_ENABLE;
1315 if (!qhp->ibqp.uobject)
1316 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1317 FW_RI_QP_STAG0_ENABLE;
1318 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1319 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1320 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1321 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1322 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1323 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1324 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1325 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1326 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1327 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1328 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1329 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1330 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1331 rhp->rdev.lldi.vr->rq.start);
1332 if (qhp->attr.mpa_attr.initiator)
1333 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1334
cfdda9d7
SW
1335 ret = c4iw_ofld_send(&rhp->rdev, skb);
1336 if (ret)
4c2c5763 1337 goto err1;
cfdda9d7 1338
2f5b48c3
SW
1339 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1340 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
4c2c5763
HS
1341 if (!ret)
1342 goto out;
1343err1:
1344 free_ird(rhp, qhp->attr.max_ird);
cfdda9d7
SW
1345out:
1346 PDBG("%s ret %d\n", __func__, ret);
1347 return ret;
1348}
1349
1350int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1351 enum c4iw_qp_attr_mask mask,
1352 struct c4iw_qp_attributes *attrs,
1353 int internal)
1354{
1355 int ret = 0;
1356 struct c4iw_qp_attributes newattr = qhp->attr;
cfdda9d7
SW
1357 int disconnect = 0;
1358 int terminate = 0;
1359 int abort = 0;
1360 int free = 0;
1361 struct c4iw_ep *ep = NULL;
1362
1363 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1364 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1365 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1366
2f5b48c3 1367 mutex_lock(&qhp->mutex);
cfdda9d7
SW
1368
1369 /* Process attr changes if in IDLE */
1370 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1371 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1372 ret = -EIO;
1373 goto out;
1374 }
1375 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1376 newattr.enable_rdma_read = attrs->enable_rdma_read;
1377 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1378 newattr.enable_rdma_write = attrs->enable_rdma_write;
1379 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1380 newattr.enable_bind = attrs->enable_bind;
1381 if (mask & C4IW_QP_ATTR_MAX_ORD) {
be4c9bad 1382 if (attrs->max_ord > c4iw_max_read_depth) {
cfdda9d7
SW
1383 ret = -EINVAL;
1384 goto out;
1385 }
1386 newattr.max_ord = attrs->max_ord;
1387 }
1388 if (mask & C4IW_QP_ATTR_MAX_IRD) {
4c2c5763 1389 if (attrs->max_ird > cur_max_read_depth(rhp)) {
cfdda9d7
SW
1390 ret = -EINVAL;
1391 goto out;
1392 }
1393 newattr.max_ird = attrs->max_ird;
1394 }
1395 qhp->attr = newattr;
1396 }
1397
2c974781 1398 if (mask & C4IW_QP_ATTR_SQ_DB) {
05eb2389 1399 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
2c974781
VP
1400 goto out;
1401 }
1402 if (mask & C4IW_QP_ATTR_RQ_DB) {
05eb2389 1403 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
2c974781
VP
1404 goto out;
1405 }
1406
cfdda9d7
SW
1407 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1408 goto out;
1409 if (qhp->attr.state == attrs->next_state)
1410 goto out;
1411
1412 switch (qhp->attr.state) {
1413 case C4IW_QP_STATE_IDLE:
1414 switch (attrs->next_state) {
1415 case C4IW_QP_STATE_RTS:
1416 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1417 ret = -EINVAL;
1418 goto out;
1419 }
1420 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1421 ret = -EINVAL;
1422 goto out;
1423 }
1424 qhp->attr.mpa_attr = attrs->mpa_attr;
1425 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1426 qhp->ep = qhp->attr.llp_stream_handle;
2f5b48c3 1427 set_state(qhp, C4IW_QP_STATE_RTS);
cfdda9d7
SW
1428
1429 /*
1430 * Ref the endpoint here and deref when we
1431 * disassociate the endpoint from the QP. This
1432 * happens in CLOSING->IDLE transition or *->ERROR
1433 * transition.
1434 */
1435 c4iw_get_ep(&qhp->ep->com);
cfdda9d7 1436 ret = rdma_init(rhp, qhp);
cfdda9d7
SW
1437 if (ret)
1438 goto err;
1439 break;
1440 case C4IW_QP_STATE_ERROR:
2f5b48c3
SW
1441 set_state(qhp, C4IW_QP_STATE_ERROR);
1442 flush_qp(qhp);
cfdda9d7
SW
1443 break;
1444 default:
1445 ret = -EINVAL;
1446 goto out;
1447 }
1448 break;
1449 case C4IW_QP_STATE_RTS:
1450 switch (attrs->next_state) {
1451 case C4IW_QP_STATE_CLOSING:
1452 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
b4e2901c 1453 t4_set_wq_in_error(&qhp->wq);
2f5b48c3 1454 set_state(qhp, C4IW_QP_STATE_CLOSING);
73d6fcad 1455 ep = qhp->ep;
cfdda9d7
SW
1456 if (!internal) {
1457 abort = 0;
1458 disconnect = 1;
2f5b48c3 1459 c4iw_get_ep(&qhp->ep->com);
cfdda9d7 1460 }
73d6fcad 1461 ret = rdma_fini(rhp, qhp, ep);
8da7e7a5 1462 if (ret)
cfdda9d7 1463 goto err;
cfdda9d7
SW
1464 break;
1465 case C4IW_QP_STATE_TERMINATE:
b4e2901c 1466 t4_set_wq_in_error(&qhp->wq);
2f5b48c3 1467 set_state(qhp, C4IW_QP_STATE_TERMINATE);
d2fe99e8
KS
1468 qhp->attr.layer_etype = attrs->layer_etype;
1469 qhp->attr.ecode = attrs->ecode;
be4c9bad 1470 ep = qhp->ep;
cc18b939
SW
1471 if (!internal) {
1472 c4iw_get_ep(&qhp->ep->com);
0e42c1f4 1473 terminate = 1;
cc18b939
SW
1474 disconnect = 1;
1475 } else {
1476 terminate = qhp->attr.send_term;
09992579
SW
1477 ret = rdma_fini(rhp, qhp, ep);
1478 if (ret)
1479 goto err;
1480 }
cfdda9d7
SW
1481 break;
1482 case C4IW_QP_STATE_ERROR:
1cf24dce 1483 t4_set_wq_in_error(&qhp->wq);
b4e2901c 1484 set_state(qhp, C4IW_QP_STATE_ERROR);
cfdda9d7
SW
1485 if (!internal) {
1486 abort = 1;
1487 disconnect = 1;
1488 ep = qhp->ep;
2f5b48c3 1489 c4iw_get_ep(&qhp->ep->com);
cfdda9d7
SW
1490 }
1491 goto err;
1492 break;
1493 default:
1494 ret = -EINVAL;
1495 goto out;
1496 }
1497 break;
1498 case C4IW_QP_STATE_CLOSING:
1499 if (!internal) {
1500 ret = -EINVAL;
1501 goto out;
1502 }
1503 switch (attrs->next_state) {
1504 case C4IW_QP_STATE_IDLE:
2f5b48c3
SW
1505 flush_qp(qhp);
1506 set_state(qhp, C4IW_QP_STATE_IDLE);
cfdda9d7
SW
1507 qhp->attr.llp_stream_handle = NULL;
1508 c4iw_put_ep(&qhp->ep->com);
1509 qhp->ep = NULL;
1510 wake_up(&qhp->wait);
1511 break;
1512 case C4IW_QP_STATE_ERROR:
1513 goto err;
1514 default:
1515 ret = -EINVAL;
1516 goto err;
1517 }
1518 break;
1519 case C4IW_QP_STATE_ERROR:
1520 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1521 ret = -EINVAL;
1522 goto out;
1523 }
1524 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1525 ret = -EINVAL;
1526 goto out;
1527 }
2f5b48c3 1528 set_state(qhp, C4IW_QP_STATE_IDLE);
cfdda9d7
SW
1529 break;
1530 case C4IW_QP_STATE_TERMINATE:
1531 if (!internal) {
1532 ret = -EINVAL;
1533 goto out;
1534 }
1535 goto err;
1536 break;
1537 default:
1538 printk(KERN_ERR "%s in a bad state %d\n",
1539 __func__, qhp->attr.state);
1540 ret = -EINVAL;
1541 goto err;
1542 break;
1543 }
1544 goto out;
1545err:
1546 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1547 qhp->wq.sq.qid);
1548
1549 /* disassociate the LLP connection */
1550 qhp->attr.llp_stream_handle = NULL;
af93fb5d
SW
1551 if (!ep)
1552 ep = qhp->ep;
cfdda9d7 1553 qhp->ep = NULL;
2f5b48c3 1554 set_state(qhp, C4IW_QP_STATE_ERROR);
cfdda9d7 1555 free = 1;
91e9c071 1556 abort = 1;
cfdda9d7 1557 BUG_ON(!ep);
2f5b48c3 1558 flush_qp(qhp);
5b341808 1559 wake_up(&qhp->wait);
cfdda9d7 1560out:
2f5b48c3 1561 mutex_unlock(&qhp->mutex);
cfdda9d7
SW
1562
1563 if (terminate)
be4c9bad 1564 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
cfdda9d7
SW
1565
1566 /*
1567 * If disconnect is 1, then we need to initiate a disconnect
1568 * on the EP. This can be a normal close (RTS->CLOSING) or
1569 * an abnormal close (RTS/CLOSING->ERROR).
1570 */
1571 if (disconnect) {
be4c9bad
RD
1572 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1573 GFP_KERNEL);
cfdda9d7
SW
1574 c4iw_put_ep(&ep->com);
1575 }
1576
1577 /*
1578 * If free is 1, then we've disassociated the EP from the QP
1579 * and we need to dereference the EP.
1580 */
1581 if (free)
1582 c4iw_put_ep(&ep->com);
cfdda9d7
SW
1583 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1584 return ret;
1585}
1586
1587int c4iw_destroy_qp(struct ib_qp *ib_qp)
1588{
1589 struct c4iw_dev *rhp;
1590 struct c4iw_qp *qhp;
1591 struct c4iw_qp_attributes attrs;
1592 struct c4iw_ucontext *ucontext;
1593
1594 qhp = to_c4iw_qp(ib_qp);
1595 rhp = qhp->rhp;
1596
1597 attrs.next_state = C4IW_QP_STATE_ERROR;
d2fe99e8
KS
1598 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1599 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1600 else
1601 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
cfdda9d7
SW
1602 wait_event(qhp->wait, !qhp->ep);
1603
05eb2389 1604 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
cfdda9d7
SW
1605 atomic_dec(&qhp->refcnt);
1606 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1607
05eb2389
SW
1608 spin_lock_irq(&rhp->lock);
1609 if (!list_empty(&qhp->db_fc_entry))
1610 list_del_init(&qhp->db_fc_entry);
1611 spin_unlock_irq(&rhp->lock);
4c2c5763 1612 free_ird(rhp, qhp->attr.max_ird);
05eb2389 1613
cfdda9d7
SW
1614 ucontext = ib_qp->uobject ?
1615 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1616 destroy_qp(&rhp->rdev, &qhp->wq,
1617 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1618
1619 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1620 kfree(qhp);
1621 return 0;
1622}
1623
1624struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1625 struct ib_udata *udata)
1626{
1627 struct c4iw_dev *rhp;
1628 struct c4iw_qp *qhp;
1629 struct c4iw_pd *php;
1630 struct c4iw_cq *schp;
1631 struct c4iw_cq *rchp;
1632 struct c4iw_create_qp_resp uresp;
ff1706f4 1633 unsigned int sqsize, rqsize;
cfdda9d7
SW
1634 struct c4iw_ucontext *ucontext;
1635 int ret;
c6d7b267 1636 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
cfdda9d7
SW
1637
1638 PDBG("%s ib_pd %p\n", __func__, pd);
1639
1640 if (attrs->qp_type != IB_QPT_RC)
1641 return ERR_PTR(-EINVAL);
1642
1643 php = to_c4iw_pd(pd);
1644 rhp = php->rhp;
1645 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1646 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1647 if (!schp || !rchp)
1648 return ERR_PTR(-EINVAL);
1649
1650 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1651 return ERR_PTR(-EINVAL);
1652
66eb19af 1653 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
cfdda9d7 1654 return ERR_PTR(-E2BIG);
66eb19af
HS
1655 rqsize = attrs->cap.max_recv_wr + 1;
1656 if (rqsize < 8)
1657 rqsize = 8;
cfdda9d7 1658
66eb19af 1659 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
cfdda9d7 1660 return ERR_PTR(-E2BIG);
66eb19af
HS
1661 sqsize = attrs->cap.max_send_wr + 1;
1662 if (sqsize < 8)
1663 sqsize = 8;
cfdda9d7
SW
1664
1665 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1666
cfdda9d7
SW
1667 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1668 if (!qhp)
1669 return ERR_PTR(-ENOMEM);
1670 qhp->wq.sq.size = sqsize;
66eb19af
HS
1671 qhp->wq.sq.memsize =
1672 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1673 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1cf24dce 1674 qhp->wq.sq.flush_cidx = -1;
cfdda9d7 1675 qhp->wq.rq.size = rqsize;
66eb19af
HS
1676 qhp->wq.rq.memsize =
1677 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1678 sizeof(*qhp->wq.rq.queue);
cfdda9d7
SW
1679
1680 if (ucontext) {
1681 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1682 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1683 }
1684
cfdda9d7
SW
1685 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1686 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1687 if (ret)
1688 goto err1;
1689
1690 attrs->cap.max_recv_wr = rqsize - 1;
1691 attrs->cap.max_send_wr = sqsize - 1;
1692 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1693
1694 qhp->rhp = rhp;
1695 qhp->attr.pd = php->pdid;
1696 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1697 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1698 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1699 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1700 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1701 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1702 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1703 qhp->attr.state = C4IW_QP_STATE_IDLE;
1704 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1705 qhp->attr.enable_rdma_read = 1;
1706 qhp->attr.enable_rdma_write = 1;
1707 qhp->attr.enable_bind = 1;
4c2c5763
HS
1708 qhp->attr.max_ord = 0;
1709 qhp->attr.max_ird = 0;
ba32de9d 1710 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
cfdda9d7 1711 spin_lock_init(&qhp->lock);
2f5b48c3 1712 mutex_init(&qhp->mutex);
cfdda9d7
SW
1713 init_waitqueue_head(&qhp->wait);
1714 atomic_set(&qhp->refcnt, 1);
1715
05eb2389 1716 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
cfdda9d7
SW
1717 if (ret)
1718 goto err2;
1719
cfdda9d7
SW
1720 if (udata) {
1721 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1722 if (!mm1) {
1723 ret = -ENOMEM;
30a6a62f 1724 goto err3;
cfdda9d7
SW
1725 }
1726 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1727 if (!mm2) {
1728 ret = -ENOMEM;
30a6a62f 1729 goto err4;
cfdda9d7
SW
1730 }
1731 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1732 if (!mm3) {
1733 ret = -ENOMEM;
30a6a62f 1734 goto err5;
cfdda9d7
SW
1735 }
1736 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1737 if (!mm4) {
1738 ret = -ENOMEM;
30a6a62f 1739 goto err6;
cfdda9d7 1740 }
c6d7b267
SW
1741 if (t4_sq_onchip(&qhp->wq.sq)) {
1742 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1743 if (!mm5) {
1744 ret = -ENOMEM;
1745 goto err7;
1746 }
1747 uresp.flags = C4IW_QPF_ONCHIP;
1748 } else
1749 uresp.flags = 0;
cfdda9d7
SW
1750 uresp.qid_mask = rhp->rdev.qpmask;
1751 uresp.sqid = qhp->wq.sq.qid;
1752 uresp.sq_size = qhp->wq.sq.size;
1753 uresp.sq_memsize = qhp->wq.sq.memsize;
1754 uresp.rqid = qhp->wq.rq.qid;
1755 uresp.rq_size = qhp->wq.rq.size;
1756 uresp.rq_memsize = qhp->wq.rq.memsize;
1757 spin_lock(&ucontext->mmap_lock);
c6d7b267
SW
1758 if (mm5) {
1759 uresp.ma_sync_key = ucontext->key;
1760 ucontext->key += PAGE_SIZE;
ae1fe07f
DC
1761 } else {
1762 uresp.ma_sync_key = 0;
c6d7b267 1763 }
cfdda9d7
SW
1764 uresp.sq_key = ucontext->key;
1765 ucontext->key += PAGE_SIZE;
1766 uresp.rq_key = ucontext->key;
1767 ucontext->key += PAGE_SIZE;
1768 uresp.sq_db_gts_key = ucontext->key;
1769 ucontext->key += PAGE_SIZE;
1770 uresp.rq_db_gts_key = ucontext->key;
1771 ucontext->key += PAGE_SIZE;
1772 spin_unlock(&ucontext->mmap_lock);
1773 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1774 if (ret)
c6d7b267 1775 goto err8;
cfdda9d7 1776 mm1->key = uresp.sq_key;
c6d7b267 1777 mm1->addr = qhp->wq.sq.phys_addr;
cfdda9d7
SW
1778 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1779 insert_mmap(ucontext, mm1);
1780 mm2->key = uresp.rq_key;
1781 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1782 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1783 insert_mmap(ucontext, mm2);
1784 mm3->key = uresp.sq_db_gts_key;
74217d4c 1785 mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa;
cfdda9d7
SW
1786 mm3->len = PAGE_SIZE;
1787 insert_mmap(ucontext, mm3);
1788 mm4->key = uresp.rq_db_gts_key;
74217d4c 1789 mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa;
cfdda9d7
SW
1790 mm4->len = PAGE_SIZE;
1791 insert_mmap(ucontext, mm4);
c6d7b267
SW
1792 if (mm5) {
1793 mm5->key = uresp.ma_sync_key;
1794 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
a56c66e8 1795 + PCIE_MA_SYNC_A) & PAGE_MASK;
c6d7b267
SW
1796 mm5->len = PAGE_SIZE;
1797 insert_mmap(ucontext, mm5);
1798 }
cfdda9d7
SW
1799 }
1800 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1801 init_timer(&(qhp->timer));
05eb2389 1802 INIT_LIST_HEAD(&qhp->db_fc_entry);
66eb19af
HS
1803 PDBG("%s sq id %u size %u memsize %zu num_entries %u "
1804 "rq id %u size %u memsize %zu num_entries %u\n", __func__,
1805 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
1806 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
1807 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
cfdda9d7 1808 return &qhp->ibqp;
c6d7b267
SW
1809err8:
1810 kfree(mm5);
cfdda9d7 1811err7:
30a6a62f 1812 kfree(mm4);
cfdda9d7 1813err6:
30a6a62f 1814 kfree(mm3);
cfdda9d7 1815err5:
30a6a62f 1816 kfree(mm2);
cfdda9d7 1817err4:
30a6a62f 1818 kfree(mm1);
cfdda9d7
SW
1819err3:
1820 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1821err2:
1822 destroy_qp(&rhp->rdev, &qhp->wq,
1823 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1824err1:
1825 kfree(qhp);
1826 return ERR_PTR(ret);
1827}
1828
1829int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1830 int attr_mask, struct ib_udata *udata)
1831{
1832 struct c4iw_dev *rhp;
1833 struct c4iw_qp *qhp;
1834 enum c4iw_qp_attr_mask mask = 0;
1835 struct c4iw_qp_attributes attrs;
1836
1837 PDBG("%s ib_qp %p\n", __func__, ibqp);
1838
1839 /* iwarp does not support the RTR state */
1840 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1841 attr_mask &= ~IB_QP_STATE;
1842
1843 /* Make sure we still have something left to do */
1844 if (!attr_mask)
1845 return 0;
1846
1847 memset(&attrs, 0, sizeof attrs);
1848 qhp = to_c4iw_qp(ibqp);
1849 rhp = qhp->rhp;
1850
1851 attrs.next_state = c4iw_convert_state(attr->qp_state);
1852 attrs.enable_rdma_read = (attr->qp_access_flags &
1853 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1854 attrs.enable_rdma_write = (attr->qp_access_flags &
1855 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1856 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1857
1858
1859 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1860 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1861 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1862 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1863 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1864
2c974781
VP
1865 /*
1866 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1867 * ringing the queue db when we're in DB_FULL mode.
c2f9da92 1868 * Only allow this on T4 devices.
2c974781
VP
1869 */
1870 attrs.sq_db_inc = attr->sq_psn;
1871 attrs.rq_db_inc = attr->rq_psn;
1872 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1873 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
c2f9da92
SW
1874 if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
1875 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
1876 return -EINVAL;
2c974781 1877
cfdda9d7
SW
1878 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1879}
1880
1881struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1882{
1883 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1884 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1885}
67bbc055
VP
1886
1887int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1888 int attr_mask, struct ib_qp_init_attr *init_attr)
1889{
1890 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1891
1892 memset(attr, 0, sizeof *attr);
1893 memset(init_attr, 0, sizeof *init_attr);
1894 attr->qp_state = to_ib_qp_state(qhp->attr.state);
3e5c02c9
HS
1895 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1896 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1897 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1898 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
1899 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
1900 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
67bbc055
VP
1901 return 0;
1902}