2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
37 static int db_delay_usecs = 1;
38 module_param(db_delay_usecs, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41 static int ocqp_support = 1;
42 module_param(ocqp_support, int, 0644);
43 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
45 int db_fc_threshold = 1000;
46 module_param(db_fc_threshold, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
51 int db_coalescing_threshold;
52 module_param(db_coalescing_threshold, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
57 static int max_fr_immd = T4_MAX_FR_IMMD;
58 module_param(max_fr_immd, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
61 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
64 spin_lock_irqsave(&qhp->lock, flag);
65 qhp->attr.state = state;
66 spin_unlock_irqrestore(&qhp->lock, flag);
69 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
71 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
74 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
76 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
77 pci_unmap_addr(sq, mapping));
80 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
83 dealloc_oc_sq(rdev, sq);
85 dealloc_host_sq(rdev, sq);
88 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
90 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
92 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
95 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
96 rdev->lldi.vr->ocq.start;
97 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
98 rdev->lldi.vr->ocq.start);
99 sq->flags |= T4_SQ_ONCHIP;
103 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
105 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
106 &(sq->dma_addr), GFP_KERNEL);
109 sq->phys_addr = virt_to_phys(sq->queue);
110 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
114 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
118 ret = alloc_oc_sq(rdev, sq);
120 ret = alloc_host_sq(rdev, sq);
124 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
125 struct c4iw_dev_ucontext *uctx)
128 * uP clears EQ contexts when the connection exits rdma mode,
129 * so no need to post a RESET WR for these EQs.
131 dma_free_coherent(&(rdev->lldi.pdev->dev),
132 wq->rq.memsize, wq->rq.queue,
133 dma_unmap_addr(&wq->rq, mapping));
134 dealloc_sq(rdev, &wq->sq);
135 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
138 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
139 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
143 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
144 struct t4_cq *rcq, struct t4_cq *scq,
145 struct c4iw_dev_ucontext *uctx)
147 int user = (uctx != &rdev->uctx);
148 struct fw_ri_res_wr *res_wr;
149 struct fw_ri_res *res;
151 struct c4iw_wr_wait wr_wait;
156 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
160 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
167 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
174 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
183 * RQT must be a power of 2.
185 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
186 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
187 if (!wq->rq.rqt_hwaddr) {
192 ret = alloc_sq(rdev, &wq->sq, user);
195 memset(wq->sq.queue, 0, wq->sq.memsize);
196 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
198 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
199 wq->rq.memsize, &(wq->rq.dma_addr),
205 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
206 __func__, wq->sq.queue,
207 (unsigned long long)virt_to_phys(wq->sq.queue),
209 (unsigned long long)virt_to_phys(wq->rq.queue));
210 memset(wq->rq.queue, 0, wq->rq.memsize);
211 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
213 wq->db = rdev->lldi.db_reg;
214 wq->gts = rdev->lldi.gts_reg;
216 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
217 (wq->sq.qid << rdev->qpshift);
218 wq->sq.udb &= PAGE_MASK;
219 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
220 (wq->rq.qid << rdev->qpshift);
221 wq->rq.udb &= PAGE_MASK;
226 /* build fw_ri_res_wr */
227 wr_len = sizeof *res_wr + 2 * sizeof *res;
229 skb = alloc_skb(wr_len, GFP_KERNEL);
234 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
236 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
237 memset(res_wr, 0, wr_len);
238 res_wr->op_nres = cpu_to_be32(
239 FW_WR_OP(FW_RI_RES_WR) |
240 V_FW_RI_RES_WR_NRES(2) |
242 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
243 res_wr->cookie = (unsigned long) &wr_wait;
245 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
246 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
249 * eqsize is the number of 64B entries plus the status page size.
251 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
253 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
254 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
255 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
256 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
257 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
258 V_FW_RI_RES_WR_IQID(scq->cqid));
259 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
260 V_FW_RI_RES_WR_DCAEN(0) |
261 V_FW_RI_RES_WR_DCACPU(0) |
262 V_FW_RI_RES_WR_FBMIN(2) |
263 V_FW_RI_RES_WR_FBMAX(2) |
264 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
265 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
266 V_FW_RI_RES_WR_EQSIZE(eqsize));
267 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
268 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
270 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
271 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
274 * eqsize is the number of 64B entries plus the status page size.
276 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
277 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
278 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
279 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
280 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
281 V_FW_RI_RES_WR_IQID(rcq->cqid));
282 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
283 V_FW_RI_RES_WR_DCAEN(0) |
284 V_FW_RI_RES_WR_DCACPU(0) |
285 V_FW_RI_RES_WR_FBMIN(2) |
286 V_FW_RI_RES_WR_FBMAX(2) |
287 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
288 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
289 V_FW_RI_RES_WR_EQSIZE(eqsize));
290 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
291 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
293 c4iw_init_wr_wait(&wr_wait);
295 ret = c4iw_ofld_send(rdev, skb);
298 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
302 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
303 __func__, wq->sq.qid, wq->rq.qid, wq->db,
304 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
308 dma_free_coherent(&(rdev->lldi.pdev->dev),
309 wq->rq.memsize, wq->rq.queue,
310 dma_unmap_addr(&wq->rq, mapping));
312 dealloc_sq(rdev, &wq->sq);
314 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
320 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
322 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
326 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
327 struct ib_send_wr *wr, int max, u32 *plenp)
334 dstp = (u8 *)immdp->data;
335 for (i = 0; i < wr->num_sge; i++) {
336 if ((plen + wr->sg_list[i].length) > max)
338 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
339 plen += wr->sg_list[i].length;
340 rem = wr->sg_list[i].length;
342 if (dstp == (u8 *)&sq->queue[sq->size])
343 dstp = (u8 *)sq->queue;
344 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
347 len = (u8 *)&sq->queue[sq->size] - dstp;
348 memcpy(dstp, srcp, len);
354 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
356 memset(dstp, 0, len);
357 immdp->op = FW_RI_DATA_IMMD;
360 immdp->immdlen = cpu_to_be32(plen);
365 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
366 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
367 int num_sge, u32 *plenp)
372 __be64 *flitp = (__be64 *)isglp->sge;
374 for (i = 0; i < num_sge; i++) {
375 if ((plen + sg_list[i].length) < plen)
377 plen += sg_list[i].length;
378 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
380 if (++flitp == queue_end)
382 *flitp = cpu_to_be64(sg_list[i].addr);
383 if (++flitp == queue_end)
386 *flitp = (__force __be64)0;
387 isglp->op = FW_RI_DATA_ISGL;
389 isglp->nsge = cpu_to_be16(num_sge);
396 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
397 struct ib_send_wr *wr, u8 *len16)
403 if (wr->num_sge > T4_MAX_SEND_SGE)
405 switch (wr->opcode) {
407 if (wr->send_flags & IB_SEND_SOLICITED)
408 wqe->send.sendop_pkd = cpu_to_be32(
409 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
411 wqe->send.sendop_pkd = cpu_to_be32(
412 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
413 wqe->send.stag_inv = 0;
415 case IB_WR_SEND_WITH_INV:
416 if (wr->send_flags & IB_SEND_SOLICITED)
417 wqe->send.sendop_pkd = cpu_to_be32(
418 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
420 wqe->send.sendop_pkd = cpu_to_be32(
421 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
422 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
431 if (wr->send_flags & IB_SEND_INLINE) {
432 ret = build_immd(sq, wqe->send.u.immd_src, wr,
433 T4_MAX_SEND_INLINE, &plen);
436 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
439 ret = build_isgl((__be64 *)sq->queue,
440 (__be64 *)&sq->queue[sq->size],
441 wqe->send.u.isgl_src,
442 wr->sg_list, wr->num_sge, &plen);
445 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
446 wr->num_sge * sizeof(struct fw_ri_sge);
449 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
450 wqe->send.u.immd_src[0].r1 = 0;
451 wqe->send.u.immd_src[0].r2 = 0;
452 wqe->send.u.immd_src[0].immdlen = 0;
453 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
456 *len16 = DIV_ROUND_UP(size, 16);
457 wqe->send.plen = cpu_to_be32(plen);
461 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
462 struct ib_send_wr *wr, u8 *len16)
468 if (wr->num_sge > T4_MAX_SEND_SGE)
471 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
472 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
474 if (wr->send_flags & IB_SEND_INLINE) {
475 ret = build_immd(sq, wqe->write.u.immd_src, wr,
476 T4_MAX_WRITE_INLINE, &plen);
479 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
482 ret = build_isgl((__be64 *)sq->queue,
483 (__be64 *)&sq->queue[sq->size],
484 wqe->write.u.isgl_src,
485 wr->sg_list, wr->num_sge, &plen);
488 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
489 wr->num_sge * sizeof(struct fw_ri_sge);
492 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
493 wqe->write.u.immd_src[0].r1 = 0;
494 wqe->write.u.immd_src[0].r2 = 0;
495 wqe->write.u.immd_src[0].immdlen = 0;
496 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
499 *len16 = DIV_ROUND_UP(size, 16);
500 wqe->write.plen = cpu_to_be32(plen);
504 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
509 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
510 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
512 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
513 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
514 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
515 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
517 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
519 wqe->read.stag_src = cpu_to_be32(2);
520 wqe->read.to_src_hi = 0;
521 wqe->read.to_src_lo = 0;
522 wqe->read.stag_sink = cpu_to_be32(2);
524 wqe->read.to_sink_hi = 0;
525 wqe->read.to_sink_lo = 0;
529 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
533 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
534 struct ib_recv_wr *wr, u8 *len16)
538 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
539 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
540 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
543 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
544 wr->num_sge * sizeof(struct fw_ri_sge), 16);
548 static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
549 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
552 struct fw_ri_immd *imdp;
555 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
558 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
561 wqe->fr.qpbinde_to_dcacpu = 0;
562 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
563 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
564 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
566 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
567 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
568 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
569 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
572 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
573 struct c4iw_fr_page_list *c4pl =
574 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
575 struct fw_ri_dsgl *sglp;
577 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
578 wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
580 wr->wr.fast_reg.page_list->page_list[i]);
583 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
584 sglp->op = FW_RI_DATA_DSGL;
586 sglp->nsge = cpu_to_be16(1);
587 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
588 sglp->len0 = cpu_to_be32(pbllen);
590 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
592 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
593 imdp->op = FW_RI_DATA_IMMD;
596 imdp->immdlen = cpu_to_be32(pbllen);
597 p = (__be64 *)(imdp + 1);
599 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
601 (u64)wr->wr.fast_reg.page_list->page_list[i]);
603 if (++p == (__be64 *)&sq->queue[sq->size])
604 p = (__be64 *)sq->queue;
610 if (++p == (__be64 *)&sq->queue[sq->size])
611 p = (__be64 *)sq->queue;
613 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
619 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
622 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
624 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
628 void c4iw_qp_add_ref(struct ib_qp *qp)
630 PDBG("%s ib_qp %p\n", __func__, qp);
631 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
634 void c4iw_qp_rem_ref(struct ib_qp *qp)
636 PDBG("%s ib_qp %p\n", __func__, qp);
637 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
638 wake_up(&(to_c4iw_qp(qp)->wait));
641 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
642 struct ib_send_wr **bad_wr)
646 enum fw_wr_opcodes fw_opcode = 0;
647 enum fw_ri_wr_flags fw_flags;
651 struct t4_swsqe *swsqe;
655 qhp = to_c4iw_qp(ibqp);
656 spin_lock_irqsave(&qhp->lock, flag);
657 if (t4_wq_in_error(&qhp->wq)) {
658 spin_unlock_irqrestore(&qhp->lock, flag);
661 num_wrs = t4_sq_avail(&qhp->wq);
663 spin_unlock_irqrestore(&qhp->lock, flag);
672 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
673 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
676 if (wr->send_flags & IB_SEND_SOLICITED)
677 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
678 if (wr->send_flags & IB_SEND_SIGNALED)
679 fw_flags |= FW_RI_COMPLETION_FLAG;
680 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
681 switch (wr->opcode) {
682 case IB_WR_SEND_WITH_INV:
684 if (wr->send_flags & IB_SEND_FENCE)
685 fw_flags |= FW_RI_READ_FENCE_FLAG;
686 fw_opcode = FW_RI_SEND_WR;
687 if (wr->opcode == IB_WR_SEND)
688 swsqe->opcode = FW_RI_SEND;
690 swsqe->opcode = FW_RI_SEND_WITH_INV;
691 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
693 case IB_WR_RDMA_WRITE:
694 fw_opcode = FW_RI_RDMA_WRITE_WR;
695 swsqe->opcode = FW_RI_RDMA_WRITE;
696 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
698 case IB_WR_RDMA_READ:
699 case IB_WR_RDMA_READ_WITH_INV:
700 fw_opcode = FW_RI_RDMA_READ_WR;
701 swsqe->opcode = FW_RI_READ_REQ;
702 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
703 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
706 err = build_rdma_read(wqe, wr, &len16);
709 swsqe->read_len = wr->sg_list[0].length;
710 if (!qhp->wq.sq.oldest_read)
711 qhp->wq.sq.oldest_read = swsqe;
713 case IB_WR_FAST_REG_MR:
714 fw_opcode = FW_RI_FR_NSMR_WR;
715 swsqe->opcode = FW_RI_FAST_REGISTER;
716 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
718 qhp->rhp->rdev.lldi.adapter_type) ?
721 case IB_WR_LOCAL_INV:
722 if (wr->send_flags & IB_SEND_FENCE)
723 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
724 fw_opcode = FW_RI_INV_LSTAG_WR;
725 swsqe->opcode = FW_RI_LOCAL_INV;
726 err = build_inv_stag(wqe, wr, &len16);
729 PDBG("%s post of type=%d TBD!\n", __func__,
737 swsqe->idx = qhp->wq.sq.pidx;
739 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
741 swsqe->wr_id = wr->wr_id;
743 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
745 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
746 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
747 swsqe->opcode, swsqe->read_len);
750 t4_sq_produce(&qhp->wq, len16);
751 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
753 if (t4_wq_db_enabled(&qhp->wq))
754 t4_ring_sq_db(&qhp->wq, idx);
755 spin_unlock_irqrestore(&qhp->lock, flag);
759 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
760 struct ib_recv_wr **bad_wr)
764 union t4_recv_wr *wqe;
770 qhp = to_c4iw_qp(ibqp);
771 spin_lock_irqsave(&qhp->lock, flag);
772 if (t4_wq_in_error(&qhp->wq)) {
773 spin_unlock_irqrestore(&qhp->lock, flag);
776 num_wrs = t4_rq_avail(&qhp->wq);
778 spin_unlock_irqrestore(&qhp->lock, flag);
782 if (wr->num_sge > T4_MAX_RECV_SGE) {
787 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
791 err = build_rdma_recv(qhp, wqe, wr, &len16);
799 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
801 wqe->recv.opcode = FW_RI_RECV_WR;
803 wqe->recv.wrid = qhp->wq.rq.pidx;
807 wqe->recv.len16 = len16;
808 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
809 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
810 t4_rq_produce(&qhp->wq, len16);
811 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
815 if (t4_wq_db_enabled(&qhp->wq))
816 t4_ring_rq_db(&qhp->wq, idx);
817 spin_unlock_irqrestore(&qhp->lock, flag);
821 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
826 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
836 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
841 status = CQE_STATUS(err_cqe);
842 opcode = CQE_OPCODE(err_cqe);
843 rqtype = RQ_TYPE(err_cqe);
844 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
845 (opcode == FW_RI_SEND_WITH_SE_INV);
846 tagged = (opcode == FW_RI_RDMA_WRITE) ||
847 (rqtype && (opcode == FW_RI_READ_RESP));
852 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
853 *ecode = RDMAP_CANT_INV_STAG;
855 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
856 *ecode = RDMAP_INV_STAG;
860 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
861 if ((opcode == FW_RI_SEND_WITH_INV) ||
862 (opcode == FW_RI_SEND_WITH_SE_INV))
863 *ecode = RDMAP_CANT_INV_STAG;
865 *ecode = RDMAP_STAG_NOT_ASSOC;
868 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
869 *ecode = RDMAP_STAG_NOT_ASSOC;
872 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
873 *ecode = RDMAP_ACC_VIOL;
876 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
877 *ecode = RDMAP_TO_WRAP;
881 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
882 *ecode = DDPT_BASE_BOUNDS;
884 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
885 *ecode = RDMAP_BASE_BOUNDS;
888 case T4_ERR_INVALIDATE_SHARED_MR:
889 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
890 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
891 *ecode = RDMAP_CANT_INV_STAG;
894 case T4_ERR_ECC_PSTAG:
895 case T4_ERR_INTERNAL_ERR:
896 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
899 case T4_ERR_OUT_OF_RQE:
900 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
901 *ecode = DDPU_INV_MSN_NOBUF;
903 case T4_ERR_PBL_ADDR_BOUND:
904 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
905 *ecode = DDPT_BASE_BOUNDS;
908 *layer_type = LAYER_MPA|DDP_LLP;
909 *ecode = MPA_CRC_ERR;
912 *layer_type = LAYER_MPA|DDP_LLP;
913 *ecode = MPA_MARKER_ERR;
915 case T4_ERR_PDU_LEN_ERR:
916 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
917 *ecode = DDPU_MSG_TOOBIG;
919 case T4_ERR_DDP_VERSION:
921 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
922 *ecode = DDPT_INV_VERS;
924 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
925 *ecode = DDPU_INV_VERS;
928 case T4_ERR_RDMA_VERSION:
929 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
930 *ecode = RDMAP_INV_VERS;
933 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
934 *ecode = RDMAP_INV_OPCODE;
936 case T4_ERR_DDP_QUEUE_NUM:
937 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
938 *ecode = DDPU_INV_QN;
942 case T4_ERR_MSN_RANGE:
943 case T4_ERR_IRD_OVERFLOW:
944 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
945 *ecode = DDPU_INV_MSN_RANGE;
948 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
952 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
953 *ecode = DDPU_INV_MO;
956 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
962 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
965 struct fw_ri_wr *wqe;
967 struct terminate_message *term;
969 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
972 skb = alloc_skb(sizeof *wqe, gfp);
975 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
977 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
978 memset(wqe, 0, sizeof *wqe);
979 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
980 wqe->flowid_len16 = cpu_to_be32(
981 FW_WR_FLOWID(qhp->ep->hwtid) |
982 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
984 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
985 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
986 term = (struct terminate_message *)wqe->u.terminate.termmsg;
987 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
988 term->layer_etype = qhp->attr.layer_etype;
989 term->ecode = qhp->attr.ecode;
991 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
992 c4iw_ofld_send(&qhp->rhp->rdev, skb);
996 * Assumes qhp lock is held.
998 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
999 struct c4iw_cq *schp)
1005 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
1007 /* locking hierarchy: cq lock first, then qp lock. */
1008 spin_lock_irqsave(&rchp->lock, flag);
1009 spin_lock(&qhp->lock);
1011 if (qhp->wq.flushed) {
1012 spin_unlock(&qhp->lock);
1013 spin_unlock_irqrestore(&rchp->lock, flag);
1016 qhp->wq.flushed = 1;
1018 c4iw_flush_hw_cq(rchp);
1019 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1020 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1021 spin_unlock(&qhp->lock);
1022 spin_unlock_irqrestore(&rchp->lock, flag);
1024 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1025 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1026 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1029 /* locking hierarchy: cq lock first, then qp lock. */
1030 spin_lock_irqsave(&schp->lock, flag);
1031 spin_lock(&qhp->lock);
1033 c4iw_flush_hw_cq(schp);
1034 flushed = c4iw_flush_sq(qhp);
1035 spin_unlock(&qhp->lock);
1036 spin_unlock_irqrestore(&schp->lock, flag);
1038 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1039 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
1040 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1044 static void flush_qp(struct c4iw_qp *qhp)
1046 struct c4iw_cq *rchp, *schp;
1049 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1050 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1052 t4_set_wq_in_error(&qhp->wq);
1053 if (qhp->ibqp.uobject) {
1054 t4_set_cq_in_error(&rchp->cq);
1055 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1056 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1057 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1059 t4_set_cq_in_error(&schp->cq);
1060 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1061 (*schp->ibcq.comp_handler)(&schp->ibcq,
1062 schp->ibcq.cq_context);
1063 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1067 __flush_qp(qhp, rchp, schp);
1070 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1073 struct fw_ri_wr *wqe;
1075 struct sk_buff *skb;
1077 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1080 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1083 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1085 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1086 memset(wqe, 0, sizeof *wqe);
1087 wqe->op_compl = cpu_to_be32(
1088 FW_WR_OP(FW_RI_INIT_WR) |
1090 wqe->flowid_len16 = cpu_to_be32(
1091 FW_WR_FLOWID(ep->hwtid) |
1092 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1093 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1095 wqe->u.fini.type = FW_RI_TYPE_FINI;
1096 ret = c4iw_ofld_send(&rhp->rdev, skb);
1100 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
1101 qhp->wq.sq.qid, __func__);
1103 PDBG("%s ret %d\n", __func__, ret);
1107 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1109 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
1110 memset(&init->u, 0, sizeof init->u);
1112 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1113 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1114 init->u.write.stag_sink = cpu_to_be32(1);
1115 init->u.write.to_sink = cpu_to_be64(1);
1116 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1117 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1118 sizeof(struct fw_ri_immd),
1121 case FW_RI_INIT_P2PTYPE_READ_REQ:
1122 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1123 init->u.read.stag_src = cpu_to_be32(1);
1124 init->u.read.to_src_lo = cpu_to_be32(1);
1125 init->u.read.stag_sink = cpu_to_be32(1);
1126 init->u.read.to_sink_lo = cpu_to_be32(1);
1127 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1132 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1134 struct fw_ri_wr *wqe;
1136 struct sk_buff *skb;
1138 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1141 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1144 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1146 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1147 memset(wqe, 0, sizeof *wqe);
1148 wqe->op_compl = cpu_to_be32(
1149 FW_WR_OP(FW_RI_INIT_WR) |
1151 wqe->flowid_len16 = cpu_to_be32(
1152 FW_WR_FLOWID(qhp->ep->hwtid) |
1153 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1155 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
1157 wqe->u.init.type = FW_RI_TYPE_INIT;
1158 wqe->u.init.mpareqbit_p2ptype =
1159 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1160 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1161 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1162 if (qhp->attr.mpa_attr.recv_marker_enabled)
1163 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1164 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1165 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1166 if (qhp->attr.mpa_attr.crc_enabled)
1167 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1169 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1170 FW_RI_QP_RDMA_WRITE_ENABLE |
1171 FW_RI_QP_BIND_ENABLE;
1172 if (!qhp->ibqp.uobject)
1173 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1174 FW_RI_QP_STAG0_ENABLE;
1175 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1176 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1177 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1178 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1179 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1180 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1181 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1182 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1183 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1184 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1185 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1186 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1187 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1188 rhp->rdev.lldi.vr->rq.start);
1189 if (qhp->attr.mpa_attr.initiator)
1190 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1192 ret = c4iw_ofld_send(&rhp->rdev, skb);
1196 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1197 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1199 PDBG("%s ret %d\n", __func__, ret);
1204 * Called by the library when the qp has user dbs disabled due to
1205 * a DB_FULL condition. This function will single-thread all user
1206 * DB rings to avoid overflowing the hw db-fifo.
1208 static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
1210 int delay = db_delay_usecs;
1212 mutex_lock(&qhp->rhp->db_mutex);
1216 * The interrupt threshold is dbfifo_int_thresh << 6. So
1217 * make sure we don't cross that and generate an interrupt.
1219 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
1220 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
1221 writel(QID(qid) | PIDX(inc), qhp->wq.db);
1224 set_current_state(TASK_UNINTERRUPTIBLE);
1225 schedule_timeout(usecs_to_jiffies(delay));
1226 delay = min(delay << 1, 2000);
1228 mutex_unlock(&qhp->rhp->db_mutex);
1232 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1233 enum c4iw_qp_attr_mask mask,
1234 struct c4iw_qp_attributes *attrs,
1238 struct c4iw_qp_attributes newattr = qhp->attr;
1243 struct c4iw_ep *ep = NULL;
1245 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1246 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1247 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1249 mutex_lock(&qhp->mutex);
1251 /* Process attr changes if in IDLE */
1252 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1253 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1257 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1258 newattr.enable_rdma_read = attrs->enable_rdma_read;
1259 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1260 newattr.enable_rdma_write = attrs->enable_rdma_write;
1261 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1262 newattr.enable_bind = attrs->enable_bind;
1263 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1264 if (attrs->max_ord > c4iw_max_read_depth) {
1268 newattr.max_ord = attrs->max_ord;
1270 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1271 if (attrs->max_ird > c4iw_max_read_depth) {
1275 newattr.max_ird = attrs->max_ird;
1277 qhp->attr = newattr;
1280 if (mask & C4IW_QP_ATTR_SQ_DB) {
1281 ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
1284 if (mask & C4IW_QP_ATTR_RQ_DB) {
1285 ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
1289 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1291 if (qhp->attr.state == attrs->next_state)
1294 switch (qhp->attr.state) {
1295 case C4IW_QP_STATE_IDLE:
1296 switch (attrs->next_state) {
1297 case C4IW_QP_STATE_RTS:
1298 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1302 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1306 qhp->attr.mpa_attr = attrs->mpa_attr;
1307 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1308 qhp->ep = qhp->attr.llp_stream_handle;
1309 set_state(qhp, C4IW_QP_STATE_RTS);
1312 * Ref the endpoint here and deref when we
1313 * disassociate the endpoint from the QP. This
1314 * happens in CLOSING->IDLE transition or *->ERROR
1317 c4iw_get_ep(&qhp->ep->com);
1318 ret = rdma_init(rhp, qhp);
1322 case C4IW_QP_STATE_ERROR:
1323 set_state(qhp, C4IW_QP_STATE_ERROR);
1331 case C4IW_QP_STATE_RTS:
1332 switch (attrs->next_state) {
1333 case C4IW_QP_STATE_CLOSING:
1334 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1335 set_state(qhp, C4IW_QP_STATE_CLOSING);
1340 c4iw_get_ep(&qhp->ep->com);
1342 t4_set_wq_in_error(&qhp->wq);
1343 ret = rdma_fini(rhp, qhp, ep);
1347 case C4IW_QP_STATE_TERMINATE:
1348 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1349 qhp->attr.layer_etype = attrs->layer_etype;
1350 qhp->attr.ecode = attrs->ecode;
1351 t4_set_wq_in_error(&qhp->wq);
1356 c4iw_get_ep(&qhp->ep->com);
1358 case C4IW_QP_STATE_ERROR:
1359 set_state(qhp, C4IW_QP_STATE_ERROR);
1360 t4_set_wq_in_error(&qhp->wq);
1365 c4iw_get_ep(&qhp->ep->com);
1374 case C4IW_QP_STATE_CLOSING:
1379 switch (attrs->next_state) {
1380 case C4IW_QP_STATE_IDLE:
1382 set_state(qhp, C4IW_QP_STATE_IDLE);
1383 qhp->attr.llp_stream_handle = NULL;
1384 c4iw_put_ep(&qhp->ep->com);
1386 wake_up(&qhp->wait);
1388 case C4IW_QP_STATE_ERROR:
1395 case C4IW_QP_STATE_ERROR:
1396 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1400 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1404 set_state(qhp, C4IW_QP_STATE_IDLE);
1406 case C4IW_QP_STATE_TERMINATE:
1414 printk(KERN_ERR "%s in a bad state %d\n",
1415 __func__, qhp->attr.state);
1422 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1425 /* disassociate the LLP connection */
1426 qhp->attr.llp_stream_handle = NULL;
1430 set_state(qhp, C4IW_QP_STATE_ERROR);
1433 wake_up(&qhp->wait);
1437 mutex_unlock(&qhp->mutex);
1440 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1443 * If disconnect is 1, then we need to initiate a disconnect
1444 * on the EP. This can be a normal close (RTS->CLOSING) or
1445 * an abnormal close (RTS/CLOSING->ERROR).
1448 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1450 c4iw_put_ep(&ep->com);
1454 * If free is 1, then we've disassociated the EP from the QP
1455 * and we need to dereference the EP.
1458 c4iw_put_ep(&ep->com);
1459 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1463 static int enable_qp_db(int id, void *p, void *data)
1465 struct c4iw_qp *qp = p;
1467 t4_enable_wq_db(&qp->wq);
1471 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1473 struct c4iw_dev *rhp;
1474 struct c4iw_qp *qhp;
1475 struct c4iw_qp_attributes attrs;
1476 struct c4iw_ucontext *ucontext;
1478 qhp = to_c4iw_qp(ib_qp);
1481 attrs.next_state = C4IW_QP_STATE_ERROR;
1482 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1483 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1485 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1486 wait_event(qhp->wait, !qhp->ep);
1488 spin_lock_irq(&rhp->lock);
1489 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1491 BUG_ON(rhp->qpcnt < 0);
1492 if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
1493 rhp->rdev.stats.db_state_transitions++;
1494 rhp->db_state = NORMAL;
1495 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1497 if (db_coalescing_threshold >= 0)
1498 if (rhp->qpcnt <= db_coalescing_threshold)
1499 cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
1500 spin_unlock_irq(&rhp->lock);
1501 atomic_dec(&qhp->refcnt);
1502 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1504 ucontext = ib_qp->uobject ?
1505 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1506 destroy_qp(&rhp->rdev, &qhp->wq,
1507 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1509 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1514 static int disable_qp_db(int id, void *p, void *data)
1516 struct c4iw_qp *qp = p;
1518 t4_disable_wq_db(&qp->wq);
1522 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1523 struct ib_udata *udata)
1525 struct c4iw_dev *rhp;
1526 struct c4iw_qp *qhp;
1527 struct c4iw_pd *php;
1528 struct c4iw_cq *schp;
1529 struct c4iw_cq *rchp;
1530 struct c4iw_create_qp_resp uresp;
1532 struct c4iw_ucontext *ucontext;
1534 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
1536 PDBG("%s ib_pd %p\n", __func__, pd);
1538 if (attrs->qp_type != IB_QPT_RC)
1539 return ERR_PTR(-EINVAL);
1541 php = to_c4iw_pd(pd);
1543 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1544 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1546 return ERR_PTR(-EINVAL);
1548 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1549 return ERR_PTR(-EINVAL);
1551 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1552 if (rqsize > T4_MAX_RQ_SIZE)
1553 return ERR_PTR(-E2BIG);
1555 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1556 if (sqsize > T4_MAX_SQ_SIZE)
1557 return ERR_PTR(-E2BIG);
1559 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1561 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1563 return ERR_PTR(-ENOMEM);
1564 qhp->wq.sq.size = sqsize;
1565 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1566 qhp->wq.sq.flush_cidx = -1;
1567 qhp->wq.rq.size = rqsize;
1568 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1571 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1572 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1575 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1576 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1578 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1579 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1583 attrs->cap.max_recv_wr = rqsize - 1;
1584 attrs->cap.max_send_wr = sqsize - 1;
1585 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1588 qhp->attr.pd = php->pdid;
1589 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1590 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1591 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1592 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1593 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1594 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1595 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1596 qhp->attr.state = C4IW_QP_STATE_IDLE;
1597 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1598 qhp->attr.enable_rdma_read = 1;
1599 qhp->attr.enable_rdma_write = 1;
1600 qhp->attr.enable_bind = 1;
1601 qhp->attr.max_ord = 1;
1602 qhp->attr.max_ird = 1;
1603 spin_lock_init(&qhp->lock);
1604 mutex_init(&qhp->mutex);
1605 init_waitqueue_head(&qhp->wait);
1606 atomic_set(&qhp->refcnt, 1);
1608 spin_lock_irq(&rhp->lock);
1609 if (rhp->db_state != NORMAL)
1610 t4_disable_wq_db(&qhp->wq);
1612 if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
1613 rhp->rdev.stats.db_state_transitions++;
1614 rhp->db_state = FLOW_CONTROL;
1615 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1617 if (db_coalescing_threshold >= 0)
1618 if (rhp->qpcnt > db_coalescing_threshold)
1619 cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
1620 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1621 spin_unlock_irq(&rhp->lock);
1626 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1631 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1636 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1641 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1646 if (t4_sq_onchip(&qhp->wq.sq)) {
1647 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1652 uresp.flags = C4IW_QPF_ONCHIP;
1655 uresp.qid_mask = rhp->rdev.qpmask;
1656 uresp.sqid = qhp->wq.sq.qid;
1657 uresp.sq_size = qhp->wq.sq.size;
1658 uresp.sq_memsize = qhp->wq.sq.memsize;
1659 uresp.rqid = qhp->wq.rq.qid;
1660 uresp.rq_size = qhp->wq.rq.size;
1661 uresp.rq_memsize = qhp->wq.rq.memsize;
1662 spin_lock(&ucontext->mmap_lock);
1664 uresp.ma_sync_key = ucontext->key;
1665 ucontext->key += PAGE_SIZE;
1667 uresp.ma_sync_key = 0;
1669 uresp.sq_key = ucontext->key;
1670 ucontext->key += PAGE_SIZE;
1671 uresp.rq_key = ucontext->key;
1672 ucontext->key += PAGE_SIZE;
1673 uresp.sq_db_gts_key = ucontext->key;
1674 ucontext->key += PAGE_SIZE;
1675 uresp.rq_db_gts_key = ucontext->key;
1676 ucontext->key += PAGE_SIZE;
1677 spin_unlock(&ucontext->mmap_lock);
1678 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1681 mm1->key = uresp.sq_key;
1682 mm1->addr = qhp->wq.sq.phys_addr;
1683 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1684 insert_mmap(ucontext, mm1);
1685 mm2->key = uresp.rq_key;
1686 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1687 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1688 insert_mmap(ucontext, mm2);
1689 mm3->key = uresp.sq_db_gts_key;
1690 mm3->addr = qhp->wq.sq.udb;
1691 mm3->len = PAGE_SIZE;
1692 insert_mmap(ucontext, mm3);
1693 mm4->key = uresp.rq_db_gts_key;
1694 mm4->addr = qhp->wq.rq.udb;
1695 mm4->len = PAGE_SIZE;
1696 insert_mmap(ucontext, mm4);
1698 mm5->key = uresp.ma_sync_key;
1699 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1700 + A_PCIE_MA_SYNC) & PAGE_MASK;
1701 mm5->len = PAGE_SIZE;
1702 insert_mmap(ucontext, mm5);
1705 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1706 init_timer(&(qhp->timer));
1707 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1708 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1722 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1724 destroy_qp(&rhp->rdev, &qhp->wq,
1725 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1728 return ERR_PTR(ret);
1731 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1732 int attr_mask, struct ib_udata *udata)
1734 struct c4iw_dev *rhp;
1735 struct c4iw_qp *qhp;
1736 enum c4iw_qp_attr_mask mask = 0;
1737 struct c4iw_qp_attributes attrs;
1739 PDBG("%s ib_qp %p\n", __func__, ibqp);
1741 /* iwarp does not support the RTR state */
1742 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1743 attr_mask &= ~IB_QP_STATE;
1745 /* Make sure we still have something left to do */
1749 memset(&attrs, 0, sizeof attrs);
1750 qhp = to_c4iw_qp(ibqp);
1753 attrs.next_state = c4iw_convert_state(attr->qp_state);
1754 attrs.enable_rdma_read = (attr->qp_access_flags &
1755 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1756 attrs.enable_rdma_write = (attr->qp_access_flags &
1757 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1758 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1761 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1762 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1763 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1764 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1765 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1768 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1769 * ringing the queue db when we're in DB_FULL mode.
1771 attrs.sq_db_inc = attr->sq_psn;
1772 attrs.rq_db_inc = attr->rq_psn;
1773 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1774 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1776 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1779 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1781 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1782 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1785 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1786 int attr_mask, struct ib_qp_init_attr *init_attr)
1788 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1790 memset(attr, 0, sizeof *attr);
1791 memset(init_attr, 0, sizeof *init_attr);
1792 attr->qp_state = to_ib_qp_state(qhp->attr.state);