2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx)
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
41 struct c4iw_wr_wait wr_wait;
45 wr_len = sizeof *res_wr + sizeof *res;
46 skb = alloc_skb(wr_len, GFP_KERNEL);
49 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP_V(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) |
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (unsigned long) &wr_wait;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET;
62 res->u.cq.iqid = cpu_to_be32(cq->cqid);
64 c4iw_init_wr_wait(&wr_wait);
65 ret = c4iw_ofld_send(rdev, skb);
67 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
71 dma_free_coherent(&(rdev->lldi.pdev->dev),
72 cq->memsize, cq->queue,
73 dma_unmap_addr(cq, mapping));
74 c4iw_put_cqid(rdev, cq->cqid, uctx);
78 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
79 struct c4iw_dev_ucontext *uctx)
81 struct fw_ri_res_wr *res_wr;
82 struct fw_ri_res *res;
84 int user = (uctx != &rdev->uctx);
85 struct c4iw_wr_wait wr_wait;
89 cq->cqid = c4iw_get_cqid(rdev, uctx);
96 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
102 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
103 &cq->dma_addr, GFP_KERNEL);
108 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
109 memset(cq->queue, 0, cq->memsize);
111 /* build fw_ri_res_wr */
112 wr_len = sizeof *res_wr + sizeof *res;
114 skb = alloc_skb(wr_len, GFP_KERNEL);
119 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
121 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
122 memset(res_wr, 0, wr_len);
123 res_wr->op_nres = cpu_to_be32(
124 FW_WR_OP_V(FW_RI_RES_WR) |
125 V_FW_RI_RES_WR_NRES(1) |
127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
128 res_wr->cookie = (unsigned long) &wr_wait;
130 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
131 res->u.cq.op = FW_RI_RES_OP_WRITE;
132 res->u.cq.iqid = cpu_to_be32(cq->cqid);
133 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
134 V_FW_RI_RES_WR_IQANUS(0) |
135 V_FW_RI_RES_WR_IQANUD(1) |
136 F_FW_RI_RES_WR_IQANDST |
137 V_FW_RI_RES_WR_IQANDSTINDEX(
138 rdev->lldi.ciq_ids[cq->vector]));
139 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
140 F_FW_RI_RES_WR_IQDROPRSS |
141 V_FW_RI_RES_WR_IQPCIECH(2) |
142 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
144 V_FW_RI_RES_WR_IQESIZE(1));
145 res->u.cq.iqsize = cpu_to_be16(cq->size);
146 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
148 c4iw_init_wr_wait(&wr_wait);
150 ret = c4iw_ofld_send(rdev, skb);
153 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
154 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
159 cq->gts = rdev->lldi.gts_reg;
162 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
163 (cq->cqid << rdev->cqshift);
164 cq->ugts &= PAGE_MASK;
168 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
169 dma_unmap_addr(cq, mapping));
173 c4iw_put_cqid(rdev, cq->cqid, uctx);
178 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
182 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
183 wq, cq, cq->sw_cidx, cq->sw_pidx);
184 memset(&cqe, 0, sizeof(cqe));
185 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
186 V_CQE_OPCODE(FW_RI_SEND) |
189 V_CQE_QPID(wq->sq.qid));
190 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
191 cq->sw_queue[cq->sw_pidx] = cqe;
195 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
198 int in_use = wq->rq.in_use - count;
201 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
202 wq, cq, wq->rq.in_use, count);
204 insert_recv_cqe(wq, cq);
210 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
211 struct t4_swsqe *swcqe)
215 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
216 wq, cq, cq->sw_cidx, cq->sw_pidx);
217 memset(&cqe, 0, sizeof(cqe));
218 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
219 V_CQE_OPCODE(swcqe->opcode) |
222 V_CQE_QPID(wq->sq.qid));
223 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
224 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
225 cq->sw_queue[cq->sw_pidx] = cqe;
229 static void advance_oldest_read(struct t4_wq *wq);
231 int c4iw_flush_sq(struct c4iw_qp *qhp)
234 struct t4_wq *wq = &qhp->wq;
235 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
236 struct t4_cq *cq = &chp->cq;
238 struct t4_swsqe *swsqe;
240 if (wq->sq.flush_cidx == -1)
241 wq->sq.flush_cidx = wq->sq.cidx;
242 idx = wq->sq.flush_cidx;
243 BUG_ON(idx >= wq->sq.size);
244 while (idx != wq->sq.pidx) {
245 swsqe = &wq->sq.sw_sq[idx];
246 BUG_ON(swsqe->flushed);
248 insert_sq_cqe(wq, cq, swsqe);
249 if (wq->sq.oldest_read == swsqe) {
250 BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
251 advance_oldest_read(wq);
254 if (++idx == wq->sq.size)
257 wq->sq.flush_cidx += flushed;
258 if (wq->sq.flush_cidx >= wq->sq.size)
259 wq->sq.flush_cidx -= wq->sq.size;
263 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
265 struct t4_swsqe *swsqe;
268 if (wq->sq.flush_cidx == -1)
269 wq->sq.flush_cidx = wq->sq.cidx;
270 cidx = wq->sq.flush_cidx;
271 BUG_ON(cidx > wq->sq.size);
273 while (cidx != wq->sq.pidx) {
274 swsqe = &wq->sq.sw_sq[cidx];
275 if (!swsqe->signaled) {
276 if (++cidx == wq->sq.size)
278 } else if (swsqe->complete) {
280 BUG_ON(swsqe->flushed);
283 * Insert this completed cqe into the swcq.
285 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
286 __func__, cidx, cq->sw_pidx);
287 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
288 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
291 if (++cidx == wq->sq.size)
293 wq->sq.flush_cidx = cidx;
299 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
300 struct t4_cqe *read_cqe)
302 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
303 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
304 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
305 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
306 V_CQE_OPCODE(FW_RI_READ_REQ) |
308 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
311 static void advance_oldest_read(struct t4_wq *wq)
314 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
316 if (rptr == wq->sq.size)
318 while (rptr != wq->sq.pidx) {
319 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
321 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
323 if (++rptr == wq->sq.size)
326 wq->sq.oldest_read = NULL;
330 * Move all CQEs from the HWCQ into the SWCQ.
331 * Deal with out-of-order and/or completions that complete
332 * prior unsignalled WRs.
334 void c4iw_flush_hw_cq(struct c4iw_cq *chp)
336 struct t4_cqe *hw_cqe, *swcqe, read_cqe;
338 struct t4_swsqe *swsqe;
341 PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
342 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
345 * This logic is similar to poll_cq(), but not quite the same
346 * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
347 * also do any translation magic that poll_cq() normally does.
350 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
353 * drop CQEs with no associated QP
358 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
361 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
363 /* If we have reached here because of async
364 * event or other error, and have egress error
367 if (CQE_TYPE(hw_cqe) == 1)
370 /* drop peer2peer RTR reads.
372 if (CQE_WRID_STAG(hw_cqe) == 1)
376 * Eat completions for unsignaled read WRs.
378 if (!qhp->wq.sq.oldest_read->signaled) {
379 advance_oldest_read(&qhp->wq);
384 * Don't write to the HWCQ, create a new read req CQE
385 * in local memory and move it into the swcq.
387 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
389 advance_oldest_read(&qhp->wq);
392 /* if its a SQ completion, then do the magic to move all the
393 * unsignaled and now in-order completions into the swcq.
395 if (SQ_TYPE(hw_cqe)) {
396 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
397 swsqe->cqe = *hw_cqe;
399 flush_completed_wrs(&qhp->wq, &chp->cq);
401 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
403 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
404 t4_swcq_produce(&chp->cq);
407 t4_hwcq_consume(&chp->cq);
408 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
412 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
414 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
417 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
420 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
423 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
428 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
434 PDBG("%s count zero %d\n", __func__, *count);
436 while (ptr != cq->sw_pidx) {
437 cqe = &cq->sw_queue[ptr];
438 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
439 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
441 if (++ptr == cq->size)
444 PDBG("%s cq %p count %d\n", __func__, cq, *count);
451 * check the validity of the first CQE,
452 * supply the wq assicated with the qpid.
454 * credit: cq credit to return to sge.
455 * cqe_flushed: 1 iff the CQE is flushed.
456 * cqe: copy of the polled CQE.
460 * -EAGAIN CQE skipped, try again.
461 * -EOVERFLOW CQ overflow detected.
463 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
464 u8 *cqe_flushed, u64 *cookie, u32 *credit)
467 struct t4_cqe *hw_cqe, read_cqe;
471 ret = t4_next_cqe(cq, &hw_cqe);
475 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
476 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
477 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
478 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
479 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
480 CQE_WRID_LOW(hw_cqe));
483 * skip cqe's not affiliated with a QP.
491 * skip hw cqe's if the wq is flushed.
493 if (wq->flushed && !SW_CQE(hw_cqe)) {
499 * skip TERMINATE cqes...
501 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
507 * Gotta tweak READ completions:
508 * 1) the cqe doesn't contain the sq_wptr from the wr.
509 * 2) opcode not reflected from the wr.
510 * 3) read_len not reflected from the wr.
511 * 4) cq_type is RQ_TYPE not SQ_TYPE.
513 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
515 /* If we have reached here because of async
516 * event or other error, and have egress error
519 if (CQE_TYPE(hw_cqe) == 1) {
520 if (CQE_STATUS(hw_cqe))
521 t4_set_wq_in_error(wq);
526 /* If this is an unsolicited read response, then the read
527 * was generated by the kernel driver as part of peer-2-peer
528 * connection setup. So ignore the completion.
530 if (CQE_WRID_STAG(hw_cqe) == 1) {
531 if (CQE_STATUS(hw_cqe))
532 t4_set_wq_in_error(wq);
538 * Eat completions for unsignaled read WRs.
540 if (!wq->sq.oldest_read->signaled) {
541 advance_oldest_read(wq);
547 * Don't write to the HWCQ, so create a new read req CQE
550 create_read_req_cqe(wq, hw_cqe, &read_cqe);
552 advance_oldest_read(wq);
555 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
556 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
557 t4_set_wq_in_error(wq);
563 if (RQ_TYPE(hw_cqe)) {
566 * HW only validates 4 bits of MSN. So we must validate that
567 * the MSN in the SEND is the next expected MSN. If its not,
568 * then we complete this with T4_ERR_MSN and mark the wq in
572 if (t4_rq_empty(wq)) {
573 t4_set_wq_in_error(wq);
577 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
578 t4_set_wq_in_error(wq);
579 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
586 * If we get here its a send completion.
588 * Handle out of order completion. These get stuffed
589 * in the SW SQ. Then the SW SQ is walked to move any
590 * now in-order completions into the SW CQ. This handles
592 * 1) reaping unsignaled WRs when the first subsequent
593 * signaled WR is completed.
594 * 2) out of order read completions.
596 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
597 struct t4_swsqe *swsqe;
599 PDBG("%s out of order completion going in sw_sq at idx %u\n",
600 __func__, CQE_WRID_SQ_IDX(hw_cqe));
601 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
602 swsqe->cqe = *hw_cqe;
612 * Reap the associated WR(s) that are freed up with this
615 if (SQ_TYPE(hw_cqe)) {
616 int idx = CQE_WRID_SQ_IDX(hw_cqe);
617 BUG_ON(idx >= wq->sq.size);
620 * Account for any unsignaled completions completed by
621 * this signaled completion. In this case, cidx points
622 * to the first unsignaled one, and idx points to the
623 * signaled one. So adjust in_use based on this delta.
624 * if this is not completing any unsigned wrs, then the
625 * delta will be 0. Handle wrapping also!
627 if (idx < wq->sq.cidx)
628 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
630 wq->sq.in_use -= idx - wq->sq.cidx;
631 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
633 wq->sq.cidx = (uint16_t)idx;
634 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
635 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
637 c4iw_log_wr_stats(wq, hw_cqe);
640 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
641 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
642 BUG_ON(t4_rq_empty(wq));
644 c4iw_log_wr_stats(wq, hw_cqe);
651 * Flush any completed cqes that are now in-order.
653 flush_completed_wrs(wq, cq);
656 if (SW_CQE(hw_cqe)) {
657 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
658 __func__, cq, cq->cqid, cq->sw_cidx);
661 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
662 __func__, cq, cq->cqid, cq->cidx);
669 * Get one cq entry from c4iw and map it to openib.
674 * -EAGAIN caller must try again
675 * any other -errno fatal error
677 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
679 struct c4iw_qp *qhp = NULL;
680 struct t4_cqe uninitialized_var(cqe), *rd_cqe;
687 ret = t4_next_cqe(&chp->cq, &rd_cqe);
692 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
696 spin_lock(&qhp->lock);
699 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
705 wc->vendor_err = CQE_STATUS(&cqe);
708 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
709 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
710 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
711 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
713 if (CQE_TYPE(&cqe) == 0) {
714 if (!CQE_STATUS(&cqe))
715 wc->byte_len = CQE_LEN(&cqe);
718 wc->opcode = IB_WC_RECV;
719 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
720 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
721 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
722 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
725 switch (CQE_OPCODE(&cqe)) {
726 case FW_RI_RDMA_WRITE:
727 wc->opcode = IB_WC_RDMA_WRITE;
730 wc->opcode = IB_WC_RDMA_READ;
731 wc->byte_len = CQE_LEN(&cqe);
733 case FW_RI_SEND_WITH_INV:
734 case FW_RI_SEND_WITH_SE_INV:
735 wc->opcode = IB_WC_SEND;
736 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
739 case FW_RI_SEND_WITH_SE:
740 wc->opcode = IB_WC_SEND;
743 wc->opcode = IB_WC_BIND_MW;
746 case FW_RI_LOCAL_INV:
747 wc->opcode = IB_WC_LOCAL_INV;
749 case FW_RI_FAST_REGISTER:
750 wc->opcode = IB_WC_FAST_REG_MR;
753 printk(KERN_ERR MOD "Unexpected opcode %d "
754 "in the CQE received for QPID=0x%0x\n",
755 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
762 wc->status = IB_WC_WR_FLUSH_ERR;
765 switch (CQE_STATUS(&cqe)) {
767 wc->status = IB_WC_SUCCESS;
770 wc->status = IB_WC_LOC_ACCESS_ERR;
773 wc->status = IB_WC_LOC_PROT_ERR;
777 wc->status = IB_WC_LOC_ACCESS_ERR;
780 wc->status = IB_WC_GENERAL_ERR;
783 wc->status = IB_WC_LOC_LEN_ERR;
785 case T4_ERR_INVALIDATE_SHARED_MR:
786 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
787 wc->status = IB_WC_MW_BIND_ERR;
791 case T4_ERR_PDU_LEN_ERR:
792 case T4_ERR_OUT_OF_RQE:
793 case T4_ERR_DDP_VERSION:
794 case T4_ERR_RDMA_VERSION:
795 case T4_ERR_DDP_QUEUE_NUM:
799 case T4_ERR_MSN_RANGE:
800 case T4_ERR_IRD_OVERFLOW:
802 case T4_ERR_INTERNAL_ERR:
803 wc->status = IB_WC_FATAL_ERR;
806 wc->status = IB_WC_WR_FLUSH_ERR;
810 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
811 CQE_STATUS(&cqe), CQE_QPID(&cqe));
817 spin_unlock(&qhp->lock);
821 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
828 chp = to_c4iw_cq(ibcq);
830 spin_lock_irqsave(&chp->lock, flags);
831 for (npolled = 0; npolled < num_entries; ++npolled) {
833 err = c4iw_poll_cq_one(chp, wc + npolled);
834 } while (err == -EAGAIN);
838 spin_unlock_irqrestore(&chp->lock, flags);
839 return !err || err == -ENODATA ? npolled : err;
842 int c4iw_destroy_cq(struct ib_cq *ib_cq)
845 struct c4iw_ucontext *ucontext;
847 PDBG("%s ib_cq %p\n", __func__, ib_cq);
848 chp = to_c4iw_cq(ib_cq);
850 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
851 atomic_dec(&chp->refcnt);
852 wait_event(chp->wait, !atomic_read(&chp->refcnt));
854 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
856 destroy_cq(&chp->rhp->rdev, &chp->cq,
857 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
862 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
863 int vector, struct ib_ucontext *ib_context,
864 struct ib_udata *udata)
866 struct c4iw_dev *rhp;
868 struct c4iw_create_cq_resp uresp;
869 struct c4iw_ucontext *ucontext = NULL;
871 size_t memsize, hwentries;
872 struct c4iw_mm_entry *mm, *mm2;
874 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
876 rhp = to_c4iw_dev(ibdev);
878 if (vector >= rhp->rdev.lldi.nciq)
879 return ERR_PTR(-EINVAL);
881 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
883 return ERR_PTR(-ENOMEM);
886 ucontext = to_c4iw_ucontext(ib_context);
888 /* account for the status page. */
891 /* IQ needs one extra entry to differentiate full vs empty. */
895 * entries must be multiple of 16 for HW.
897 entries = roundup(entries, 16);
900 * Make actual HW queue 2x to avoid cdix_inc overflows.
902 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
905 * Make HW queue at least 64 entries so GTS updates aren't too
911 memsize = hwentries * sizeof *chp->cq.queue;
914 * memsize must be a multiple of the page size if its a user cq.
917 memsize = roundup(memsize, PAGE_SIZE);
918 chp->cq.size = hwentries;
919 chp->cq.memsize = memsize;
920 chp->cq.vector = vector;
922 ret = create_cq(&rhp->rdev, &chp->cq,
923 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
928 chp->cq.size--; /* status page */
929 chp->ibcq.cqe = entries - 2;
930 spin_lock_init(&chp->lock);
931 spin_lock_init(&chp->comp_handler_lock);
932 atomic_set(&chp->refcnt, 1);
933 init_waitqueue_head(&chp->wait);
934 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
939 mm = kmalloc(sizeof *mm, GFP_KERNEL);
942 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
946 uresp.qid_mask = rhp->rdev.cqmask;
947 uresp.cqid = chp->cq.cqid;
948 uresp.size = chp->cq.size;
949 uresp.memsize = chp->cq.memsize;
950 spin_lock(&ucontext->mmap_lock);
951 uresp.key = ucontext->key;
952 ucontext->key += PAGE_SIZE;
953 uresp.gts_key = ucontext->key;
954 ucontext->key += PAGE_SIZE;
955 spin_unlock(&ucontext->mmap_lock);
956 ret = ib_copy_to_udata(udata, &uresp,
957 sizeof(uresp) - sizeof(uresp.reserved));
962 mm->addr = virt_to_phys(chp->cq.queue);
963 mm->len = chp->cq.memsize;
964 insert_mmap(ucontext, mm);
966 mm2->key = uresp.gts_key;
967 mm2->addr = chp->cq.ugts;
968 mm2->len = PAGE_SIZE;
969 insert_mmap(ucontext, mm2);
971 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
972 __func__, chp->cq.cqid, chp, chp->cq.size,
974 (unsigned long long) chp->cq.dma_addr);
981 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
983 destroy_cq(&chp->rhp->rdev, &chp->cq,
984 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
990 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
995 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1001 chp = to_c4iw_cq(ibcq);
1002 spin_lock_irqsave(&chp->lock, flag);
1003 ret = t4_arm_cq(&chp->cq,
1004 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1005 spin_unlock_irqrestore(&chp->lock, flag);
1006 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))