cxgb4: Support for user mode bar2 mappings with T4
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb4 / cq.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "iw_cxgb4.h"
34
35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx)
37{
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
40 int wr_len;
41 struct c4iw_wr_wait wr_wait;
42 struct sk_buff *skb;
43 int ret;
44
45 wr_len = sizeof *res_wr + sizeof *res;
d3c814e8 46 skb = alloc_skb(wr_len, GFP_KERNEL);
cfdda9d7
SW
47 if (!skb)
48 return -ENOMEM;
49 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
50
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32(
e2ac9628 54 FW_WR_OP_V(FW_RI_RES_WR) |
cf7fe64a 55 FW_RI_RES_WR_NRES_V(1) |
e2ac9628 56 FW_WR_COMPL_F);
cfdda9d7 57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
6198dd8d 58 res_wr->cookie = (uintptr_t)&wr_wait;
cfdda9d7
SW
59 res = res_wr->res;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET;
62 res->u.cq.iqid = cpu_to_be32(cq->cqid);
63
64 c4iw_init_wr_wait(&wr_wait);
65 ret = c4iw_ofld_send(rdev, skb);
66 if (!ret) {
aadc4df3 67 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
cfdda9d7
SW
68 }
69
70 kfree(cq->sw_queue);
71 dma_free_coherent(&(rdev->lldi.pdev->dev),
72 cq->memsize, cq->queue,
f38926aa 73 dma_unmap_addr(cq, mapping));
cfdda9d7
SW
74 c4iw_put_cqid(rdev, cq->cqid, uctx);
75 return ret;
76}
77
78static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
79 struct c4iw_dev_ucontext *uctx)
80{
81 struct fw_ri_res_wr *res_wr;
82 struct fw_ri_res *res;
83 int wr_len;
84 int user = (uctx != &rdev->uctx);
85 struct c4iw_wr_wait wr_wait;
86 int ret;
87 struct sk_buff *skb;
88
89 cq->cqid = c4iw_get_cqid(rdev, uctx);
90 if (!cq->cqid) {
91 ret = -ENOMEM;
92 goto err1;
93 }
94
95 if (!user) {
96 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
97 if (!cq->sw_queue) {
98 ret = -ENOMEM;
99 goto err2;
100 }
101 }
102 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
103 &cq->dma_addr, GFP_KERNEL);
104 if (!cq->queue) {
105 ret = -ENOMEM;
106 goto err3;
107 }
f38926aa 108 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
cfdda9d7
SW
109 memset(cq->queue, 0, cq->memsize);
110
111 /* build fw_ri_res_wr */
112 wr_len = sizeof *res_wr + sizeof *res;
113
d3c814e8 114 skb = alloc_skb(wr_len, GFP_KERNEL);
cfdda9d7
SW
115 if (!skb) {
116 ret = -ENOMEM;
117 goto err4;
118 }
119 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
120
121 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
122 memset(res_wr, 0, wr_len);
123 res_wr->op_nres = cpu_to_be32(
e2ac9628 124 FW_WR_OP_V(FW_RI_RES_WR) |
cf7fe64a 125 FW_RI_RES_WR_NRES_V(1) |
e2ac9628 126 FW_WR_COMPL_F);
cfdda9d7 127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
6198dd8d 128 res_wr->cookie = (uintptr_t)&wr_wait;
cfdda9d7
SW
129 res = res_wr->res;
130 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
131 res->u.cq.op = FW_RI_RES_OP_WRITE;
132 res->u.cq.iqid = cpu_to_be32(cq->cqid);
133 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
cf7fe64a
HS
134 FW_RI_RES_WR_IQANUS_V(0) |
135 FW_RI_RES_WR_IQANUD_V(1) |
136 FW_RI_RES_WR_IQANDST_F |
137 FW_RI_RES_WR_IQANDSTINDEX_V(
cf38be6d 138 rdev->lldi.ciq_ids[cq->vector]));
cfdda9d7 139 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
cf7fe64a
HS
140 FW_RI_RES_WR_IQDROPRSS_F |
141 FW_RI_RES_WR_IQPCIECH_V(2) |
142 FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
143 FW_RI_RES_WR_IQO_F |
144 FW_RI_RES_WR_IQESIZE_V(1));
cfdda9d7
SW
145 res->u.cq.iqsize = cpu_to_be16(cq->size);
146 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
147
148 c4iw_init_wr_wait(&wr_wait);
149
150 ret = c4iw_ofld_send(rdev, skb);
151 if (ret)
152 goto err4;
153 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
aadc4df3 154 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
cfdda9d7
SW
155 if (ret)
156 goto err4;
157
158 cq->gen = 1;
cfdda9d7
SW
159 cq->rdev = rdev;
160 if (user) {
09ece8b9
H
161 u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK;
162
163 cq->ugts = (u64)rdev->bar2_pa + off;
164 } else if (is_t4(rdev->lldi.adapter_type)) {
165 cq->gts = rdev->lldi.gts_reg;
166 cq->qid_mask = -1U;
167 } else {
168 u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12;
169
170 cq->gts = rdev->bar2_kva + off;
171 cq->qid_mask = rdev->qpmask;
cfdda9d7
SW
172 }
173 return 0;
174err4:
175 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
f38926aa 176 dma_unmap_addr(cq, mapping));
cfdda9d7
SW
177err3:
178 kfree(cq->sw_queue);
179err2:
180 c4iw_put_cqid(rdev, cq->cqid, uctx);
181err1:
182 return ret;
183}
184
185static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
186{
187 struct t4_cqe cqe;
188
189 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
190 wq, cq, cq->sw_cidx, cq->sw_pidx);
191 memset(&cqe, 0, sizeof(cqe));
a56c66e8
HS
192 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
193 CQE_OPCODE_V(FW_RI_SEND) |
194 CQE_TYPE_V(0) |
195 CQE_SWCQE_V(1) |
196 CQE_QPID_V(wq->sq.qid));
197 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cfdda9d7
SW
198 cq->sw_queue[cq->sw_pidx] = cqe;
199 t4_swcq_produce(cq);
200}
201
202int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
203{
204 int flushed = 0;
205 int in_use = wq->rq.in_use - count;
206
207 BUG_ON(in_use < 0);
208 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
209 wq, cq, wq->rq.in_use, count);
210 while (in_use--) {
211 insert_recv_cqe(wq, cq);
212 flushed++;
213 }
214 return flushed;
215}
216
217static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
218 struct t4_swsqe *swcqe)
219{
220 struct t4_cqe cqe;
221
222 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
223 wq, cq, cq->sw_cidx, cq->sw_pidx);
224 memset(&cqe, 0, sizeof(cqe));
a56c66e8
HS
225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
226 CQE_OPCODE_V(swcqe->opcode) |
227 CQE_TYPE_V(1) |
228 CQE_SWCQE_V(1) |
229 CQE_QPID_V(wq->sq.qid));
cfdda9d7 230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
a56c66e8 231 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cfdda9d7
SW
232 cq->sw_queue[cq->sw_pidx] = cqe;
233 t4_swcq_produce(cq);
234}
235
1cf24dce
SW
236static void advance_oldest_read(struct t4_wq *wq);
237
238int c4iw_flush_sq(struct c4iw_qp *qhp)
cfdda9d7
SW
239{
240 int flushed = 0;
1cf24dce
SW
241 struct t4_wq *wq = &qhp->wq;
242 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
243 struct t4_cq *cq = &chp->cq;
244 int idx;
245 struct t4_swsqe *swsqe;
1cf24dce
SW
246
247 if (wq->sq.flush_cidx == -1)
248 wq->sq.flush_cidx = wq->sq.cidx;
249 idx = wq->sq.flush_cidx;
250 BUG_ON(idx >= wq->sq.size);
251 while (idx != wq->sq.pidx) {
b4e2901c
SW
252 swsqe = &wq->sq.sw_sq[idx];
253 BUG_ON(swsqe->flushed);
254 swsqe->flushed = 1;
255 insert_sq_cqe(wq, cq, swsqe);
256 if (wq->sq.oldest_read == swsqe) {
257 BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
258 advance_oldest_read(wq);
1cf24dce 259 }
b4e2901c 260 flushed++;
1cf24dce
SW
261 if (++idx == wq->sq.size)
262 idx = 0;
cfdda9d7 263 }
1cf24dce
SW
264 wq->sq.flush_cidx += flushed;
265 if (wq->sq.flush_cidx >= wq->sq.size)
266 wq->sq.flush_cidx -= wq->sq.size;
cfdda9d7
SW
267 return flushed;
268}
269
1cf24dce
SW
270static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
271{
272 struct t4_swsqe *swsqe;
273 int cidx;
274
275 if (wq->sq.flush_cidx == -1)
276 wq->sq.flush_cidx = wq->sq.cidx;
277 cidx = wq->sq.flush_cidx;
278 BUG_ON(cidx > wq->sq.size);
279
280 while (cidx != wq->sq.pidx) {
281 swsqe = &wq->sq.sw_sq[cidx];
282 if (!swsqe->signaled) {
283 if (++cidx == wq->sq.size)
284 cidx = 0;
285 } else if (swsqe->complete) {
286
287 BUG_ON(swsqe->flushed);
288
289 /*
290 * Insert this completed cqe into the swcq.
291 */
292 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
293 __func__, cidx, cq->sw_pidx);
a56c66e8 294 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
1cf24dce
SW
295 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
296 t4_swcq_produce(cq);
297 swsqe->flushed = 1;
298 if (++cidx == wq->sq.size)
299 cidx = 0;
300 wq->sq.flush_cidx = cidx;
301 } else
302 break;
303 }
304}
305
306static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
307 struct t4_cqe *read_cqe)
308{
309 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
310 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
a56c66e8
HS
311 read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
312 CQE_SWCQE_V(SW_CQE(hw_cqe)) |
313 CQE_OPCODE_V(FW_RI_READ_REQ) |
314 CQE_TYPE_V(1));
1cf24dce
SW
315 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
316}
317
318static void advance_oldest_read(struct t4_wq *wq)
319{
320
321 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
322
323 if (rptr == wq->sq.size)
324 rptr = 0;
325 while (rptr != wq->sq.pidx) {
326 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
327
328 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
329 return;
330 if (++rptr == wq->sq.size)
331 rptr = 0;
332 }
333 wq->sq.oldest_read = NULL;
334}
335
cfdda9d7
SW
336/*
337 * Move all CQEs from the HWCQ into the SWCQ.
1cf24dce
SW
338 * Deal with out-of-order and/or completions that complete
339 * prior unsignalled WRs.
cfdda9d7 340 */
1cf24dce 341void c4iw_flush_hw_cq(struct c4iw_cq *chp)
cfdda9d7 342{
1cf24dce
SW
343 struct t4_cqe *hw_cqe, *swcqe, read_cqe;
344 struct c4iw_qp *qhp;
345 struct t4_swsqe *swsqe;
cfdda9d7
SW
346 int ret;
347
1cf24dce
SW
348 PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
349 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
350
351 /*
352 * This logic is similar to poll_cq(), but not quite the same
353 * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
354 * also do any translation magic that poll_cq() normally does.
355 */
cfdda9d7 356 while (!ret) {
1cf24dce
SW
357 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
358
359 /*
360 * drop CQEs with no associated QP
361 */
362 if (qhp == NULL)
363 goto next_cqe;
364
365 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
366 goto next_cqe;
367
368 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
369
70b9c660
SW
370 /* If we have reached here because of async
371 * event or other error, and have egress error
372 * then drop
373 */
374 if (CQE_TYPE(hw_cqe) == 1)
375 goto next_cqe;
376
377 /* drop peer2peer RTR reads.
1cf24dce
SW
378 */
379 if (CQE_WRID_STAG(hw_cqe) == 1)
380 goto next_cqe;
381
382 /*
383 * Eat completions for unsignaled read WRs.
384 */
385 if (!qhp->wq.sq.oldest_read->signaled) {
386 advance_oldest_read(&qhp->wq);
387 goto next_cqe;
388 }
389
390 /*
391 * Don't write to the HWCQ, create a new read req CQE
392 * in local memory and move it into the swcq.
393 */
394 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
395 hw_cqe = &read_cqe;
396 advance_oldest_read(&qhp->wq);
397 }
398
399 /* if its a SQ completion, then do the magic to move all the
400 * unsignaled and now in-order completions into the swcq.
401 */
402 if (SQ_TYPE(hw_cqe)) {
403 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
404 swsqe->cqe = *hw_cqe;
405 swsqe->complete = 1;
406 flush_completed_wrs(&qhp->wq, &chp->cq);
407 } else {
408 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
409 *swcqe = *hw_cqe;
a56c66e8 410 swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
1cf24dce
SW
411 t4_swcq_produce(&chp->cq);
412 }
413next_cqe:
414 t4_hwcq_consume(&chp->cq);
415 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
cfdda9d7
SW
416 }
417}
418
419static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
420{
421 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
422 return 0;
423
424 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
425 return 0;
426
427 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
428 return 0;
429
430 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
431 return 0;
432 return 1;
433}
434
cfdda9d7
SW
435void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
436{
437 struct t4_cqe *cqe;
438 u32 ptr;
439
440 *count = 0;
441 PDBG("%s count zero %d\n", __func__, *count);
442 ptr = cq->sw_cidx;
443 while (ptr != cq->sw_pidx) {
444 cqe = &cq->sw_queue[ptr];
445 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
c34c97ad 446 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
cfdda9d7
SW
447 (*count)++;
448 if (++ptr == cq->size)
449 ptr = 0;
450 }
451 PDBG("%s cq %p count %d\n", __func__, cq, *count);
452}
453
cfdda9d7
SW
454/*
455 * poll_cq
456 *
457 * Caller must:
458 * check the validity of the first CQE,
459 * supply the wq assicated with the qpid.
460 *
461 * credit: cq credit to return to sge.
462 * cqe_flushed: 1 iff the CQE is flushed.
463 * cqe: copy of the polled CQE.
464 *
465 * return value:
466 * 0 CQE returned ok.
467 * -EAGAIN CQE skipped, try again.
468 * -EOVERFLOW CQ overflow detected.
469 */
470static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
471 u8 *cqe_flushed, u64 *cookie, u32 *credit)
472{
473 int ret = 0;
474 struct t4_cqe *hw_cqe, read_cqe;
475
476 *cqe_flushed = 0;
477 *credit = 0;
478 ret = t4_next_cqe(cq, &hw_cqe);
479 if (ret)
480 return ret;
481
482 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
483 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
484 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
485 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
486 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
487 CQE_WRID_LOW(hw_cqe));
488
489 /*
490 * skip cqe's not affiliated with a QP.
491 */
492 if (wq == NULL) {
493 ret = -EAGAIN;
494 goto skip_cqe;
495 }
496
1cf24dce
SW
497 /*
498 * skip hw cqe's if the wq is flushed.
499 */
500 if (wq->flushed && !SW_CQE(hw_cqe)) {
501 ret = -EAGAIN;
502 goto skip_cqe;
503 }
504
505 /*
506 * skip TERMINATE cqes...
507 */
508 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
509 ret = -EAGAIN;
510 goto skip_cqe;
511 }
512
cfdda9d7
SW
513 /*
514 * Gotta tweak READ completions:
515 * 1) the cqe doesn't contain the sq_wptr from the wr.
516 * 2) opcode not reflected from the wr.
517 * 3) read_len not reflected from the wr.
518 * 4) cq_type is RQ_TYPE not SQ_TYPE.
519 */
520 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
521
70b9c660
SW
522 /* If we have reached here because of async
523 * event or other error, and have egress error
524 * then drop
525 */
526 if (CQE_TYPE(hw_cqe) == 1) {
527 if (CQE_STATUS(hw_cqe))
528 t4_set_wq_in_error(wq);
529 ret = -EAGAIN;
530 goto skip_cqe;
531 }
532
533 /* If this is an unsolicited read response, then the read
cfdda9d7
SW
534 * was generated by the kernel driver as part of peer-2-peer
535 * connection setup. So ignore the completion.
536 */
1cf24dce 537 if (CQE_WRID_STAG(hw_cqe) == 1) {
cfdda9d7
SW
538 if (CQE_STATUS(hw_cqe))
539 t4_set_wq_in_error(wq);
540 ret = -EAGAIN;
541 goto skip_cqe;
542 }
543
1cf24dce
SW
544 /*
545 * Eat completions for unsignaled read WRs.
546 */
547 if (!wq->sq.oldest_read->signaled) {
548 advance_oldest_read(wq);
549 ret = -EAGAIN;
550 goto skip_cqe;
551 }
552
cfdda9d7
SW
553 /*
554 * Don't write to the HWCQ, so create a new read req CQE
555 * in local memory.
556 */
557 create_read_req_cqe(wq, hw_cqe, &read_cqe);
558 hw_cqe = &read_cqe;
559 advance_oldest_read(wq);
560 }
561
562 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
1cf24dce 563 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
cfdda9d7 564 t4_set_wq_in_error(wq);
6ff0e343
SW
565 }
566
cfdda9d7
SW
567 /*
568 * RECV completion.
569 */
570 if (RQ_TYPE(hw_cqe)) {
571
572 /*
573 * HW only validates 4 bits of MSN. So we must validate that
574 * the MSN in the SEND is the next expected MSN. If its not,
575 * then we complete this with T4_ERR_MSN and mark the wq in
576 * error.
577 */
578
579 if (t4_rq_empty(wq)) {
580 t4_set_wq_in_error(wq);
581 ret = -EAGAIN;
582 goto skip_cqe;
583 }
584 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
585 t4_set_wq_in_error(wq);
a56c66e8 586 hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
cfdda9d7
SW
587 goto proc_cqe;
588 }
589 goto proc_cqe;
590 }
591
592 /*
593 * If we get here its a send completion.
594 *
595 * Handle out of order completion. These get stuffed
596 * in the SW SQ. Then the SW SQ is walked to move any
597 * now in-order completions into the SW CQ. This handles
598 * 2 cases:
599 * 1) reaping unsignaled WRs when the first subsequent
600 * signaled WR is completed.
601 * 2) out of order read completions.
602 */
603 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
604 struct t4_swsqe *swsqe;
605
606 PDBG("%s out of order completion going in sw_sq at idx %u\n",
607 __func__, CQE_WRID_SQ_IDX(hw_cqe));
608 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
609 swsqe->cqe = *hw_cqe;
610 swsqe->complete = 1;
611 ret = -EAGAIN;
612 goto flush_wq;
613 }
614
615proc_cqe:
616 *cqe = *hw_cqe;
617
618 /*
619 * Reap the associated WR(s) that are freed up with this
620 * completion.
621 */
622 if (SQ_TYPE(hw_cqe)) {
1cf24dce 623 int idx = CQE_WRID_SQ_IDX(hw_cqe);
8a9c399e 624 BUG_ON(idx >= wq->sq.size);
1cf24dce
SW
625
626 /*
627 * Account for any unsignaled completions completed by
628 * this signaled completion. In this case, cidx points
629 * to the first unsignaled one, and idx points to the
630 * signaled one. So adjust in_use based on this delta.
631 * if this is not completing any unsigned wrs, then the
27ca34f5 632 * delta will be 0. Handle wrapping also!
1cf24dce 633 */
27ca34f5
SW
634 if (idx < wq->sq.cidx)
635 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
636 else
637 wq->sq.in_use -= idx - wq->sq.cidx;
8a9c399e 638 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
1cf24dce
SW
639
640 wq->sq.cidx = (uint16_t)idx;
cfdda9d7
SW
641 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
642 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
7730b4c7
HS
643 if (c4iw_wr_log)
644 c4iw_log_wr_stats(wq, hw_cqe);
cfdda9d7
SW
645 t4_sq_consume(wq);
646 } else {
647 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
648 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
649 BUG_ON(t4_rq_empty(wq));
7730b4c7
HS
650 if (c4iw_wr_log)
651 c4iw_log_wr_stats(wq, hw_cqe);
cfdda9d7 652 t4_rq_consume(wq);
1cf24dce 653 goto skip_cqe;
cfdda9d7
SW
654 }
655
656flush_wq:
657 /*
658 * Flush any completed cqes that are now in-order.
659 */
660 flush_completed_wrs(wq, cq);
661
662skip_cqe:
663 if (SW_CQE(hw_cqe)) {
664 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
665 __func__, cq, cq->cqid, cq->sw_cidx);
666 t4_swcq_consume(cq);
667 } else {
668 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
669 __func__, cq, cq->cqid, cq->cidx);
670 t4_hwcq_consume(cq);
671 }
672 return ret;
673}
674
675/*
676 * Get one cq entry from c4iw and map it to openib.
677 *
678 * Returns:
679 * 0 cqe returned
680 * -ENODATA EMPTY;
681 * -EAGAIN caller must try again
682 * any other -errno fatal error
683 */
684static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
685{
686 struct c4iw_qp *qhp = NULL;
97df1c67 687 struct t4_cqe uninitialized_var(cqe), *rd_cqe;
cfdda9d7
SW
688 struct t4_wq *wq;
689 u32 credit = 0;
690 u8 cqe_flushed;
691 u64 cookie = 0;
692 int ret;
693
694 ret = t4_next_cqe(&chp->cq, &rd_cqe);
695
696 if (ret)
697 return ret;
698
699 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
700 if (!qhp)
701 wq = NULL;
702 else {
703 spin_lock(&qhp->lock);
704 wq = &(qhp->wq);
705 }
706 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
707 if (ret)
708 goto out;
709
710 wc->wr_id = cookie;
711 wc->qp = &qhp->ibqp;
712 wc->vendor_err = CQE_STATUS(&cqe);
713 wc->wc_flags = 0;
714
715 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
716 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
717 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
718 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
719
720 if (CQE_TYPE(&cqe) == 0) {
721 if (!CQE_STATUS(&cqe))
722 wc->byte_len = CQE_LEN(&cqe);
723 else
724 wc->byte_len = 0;
725 wc->opcode = IB_WC_RECV;
726 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
727 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
728 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
729 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
730 }
731 } else {
732 switch (CQE_OPCODE(&cqe)) {
733 case FW_RI_RDMA_WRITE:
734 wc->opcode = IB_WC_RDMA_WRITE;
735 break;
736 case FW_RI_READ_REQ:
737 wc->opcode = IB_WC_RDMA_READ;
738 wc->byte_len = CQE_LEN(&cqe);
739 break;
740 case FW_RI_SEND_WITH_INV:
741 case FW_RI_SEND_WITH_SE_INV:
742 wc->opcode = IB_WC_SEND;
743 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
744 break;
745 case FW_RI_SEND:
746 case FW_RI_SEND_WITH_SE:
747 wc->opcode = IB_WC_SEND;
748 break;
749 case FW_RI_BIND_MW:
750 wc->opcode = IB_WC_BIND_MW;
751 break;
752
753 case FW_RI_LOCAL_INV:
754 wc->opcode = IB_WC_LOCAL_INV;
755 break;
756 case FW_RI_FAST_REGISTER:
757 wc->opcode = IB_WC_FAST_REG_MR;
758 break;
759 default:
760 printk(KERN_ERR MOD "Unexpected opcode %d "
761 "in the CQE received for QPID=0x%0x\n",
762 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
763 ret = -EINVAL;
764 goto out;
765 }
766 }
767
768 if (cqe_flushed)
769 wc->status = IB_WC_WR_FLUSH_ERR;
770 else {
771
772 switch (CQE_STATUS(&cqe)) {
773 case T4_ERR_SUCCESS:
774 wc->status = IB_WC_SUCCESS;
775 break;
776 case T4_ERR_STAG:
777 wc->status = IB_WC_LOC_ACCESS_ERR;
778 break;
779 case T4_ERR_PDID:
780 wc->status = IB_WC_LOC_PROT_ERR;
781 break;
782 case T4_ERR_QPID:
783 case T4_ERR_ACCESS:
784 wc->status = IB_WC_LOC_ACCESS_ERR;
785 break;
786 case T4_ERR_WRAP:
787 wc->status = IB_WC_GENERAL_ERR;
788 break;
789 case T4_ERR_BOUND:
790 wc->status = IB_WC_LOC_LEN_ERR;
791 break;
792 case T4_ERR_INVALIDATE_SHARED_MR:
793 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
794 wc->status = IB_WC_MW_BIND_ERR;
795 break;
796 case T4_ERR_CRC:
797 case T4_ERR_MARKER:
798 case T4_ERR_PDU_LEN_ERR:
799 case T4_ERR_OUT_OF_RQE:
800 case T4_ERR_DDP_VERSION:
801 case T4_ERR_RDMA_VERSION:
802 case T4_ERR_DDP_QUEUE_NUM:
803 case T4_ERR_MSN:
804 case T4_ERR_TBIT:
805 case T4_ERR_MO:
806 case T4_ERR_MSN_RANGE:
807 case T4_ERR_IRD_OVERFLOW:
808 case T4_ERR_OPCODE:
6ff0e343 809 case T4_ERR_INTERNAL_ERR:
cfdda9d7
SW
810 wc->status = IB_WC_FATAL_ERR;
811 break;
812 case T4_ERR_SWFLUSH:
813 wc->status = IB_WC_WR_FLUSH_ERR;
814 break;
815 default:
816 printk(KERN_ERR MOD
817 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
818 CQE_STATUS(&cqe), CQE_QPID(&cqe));
819 ret = -EINVAL;
820 }
821 }
822out:
823 if (wq)
824 spin_unlock(&qhp->lock);
825 return ret;
826}
827
828int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
829{
830 struct c4iw_cq *chp;
831 unsigned long flags;
832 int npolled;
833 int err = 0;
834
835 chp = to_c4iw_cq(ibcq);
836
837 spin_lock_irqsave(&chp->lock, flags);
838 for (npolled = 0; npolled < num_entries; ++npolled) {
839 do {
840 err = c4iw_poll_cq_one(chp, wc + npolled);
841 } while (err == -EAGAIN);
842 if (err)
843 break;
844 }
845 spin_unlock_irqrestore(&chp->lock, flags);
846 return !err || err == -ENODATA ? npolled : err;
847}
848
849int c4iw_destroy_cq(struct ib_cq *ib_cq)
850{
851 struct c4iw_cq *chp;
852 struct c4iw_ucontext *ucontext;
853
854 PDBG("%s ib_cq %p\n", __func__, ib_cq);
855 chp = to_c4iw_cq(ib_cq);
856
857 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
858 atomic_dec(&chp->refcnt);
859 wait_event(chp->wait, !atomic_read(&chp->refcnt));
860
861 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
862 : NULL;
863 destroy_cq(&chp->rhp->rdev, &chp->cq,
864 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
865 kfree(chp);
866 return 0;
867}
868
869struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
870 int vector, struct ib_ucontext *ib_context,
871 struct ib_udata *udata)
872{
873 struct c4iw_dev *rhp;
874 struct c4iw_cq *chp;
875 struct c4iw_create_cq_resp uresp;
876 struct c4iw_ucontext *ucontext = NULL;
877 int ret;
1973e8b8 878 size_t memsize, hwentries;
cfdda9d7
SW
879 struct c4iw_mm_entry *mm, *mm2;
880
881 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
882
883 rhp = to_c4iw_dev(ibdev);
884
cf38be6d
HS
885 if (vector >= rhp->rdev.lldi.nciq)
886 return ERR_PTR(-EINVAL);
887
cfdda9d7
SW
888 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
889 if (!chp)
890 return ERR_PTR(-ENOMEM);
891
892 if (ib_context)
893 ucontext = to_c4iw_ucontext(ib_context);
894
895 /* account for the status page. */
896 entries++;
897
895cf5f3
SW
898 /* IQ needs one extra entry to differentiate full vs empty. */
899 entries++;
900
cfdda9d7
SW
901 /*
902 * entries must be multiple of 16 for HW.
903 */
904 entries = roundup(entries, 16);
1973e8b8
SW
905
906 /*
907 * Make actual HW queue 2x to avoid cdix_inc overflows.
908 */
04e10e21 909 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
1973e8b8
SW
910
911 /*
912 * Make HW queue at least 64 entries so GTS updates aren't too
913 * frequent.
914 */
915 if (hwentries < 64)
916 hwentries = 64;
917
918 memsize = hwentries * sizeof *chp->cq.queue;
cfdda9d7
SW
919
920 /*
921 * memsize must be a multiple of the page size if its a user cq.
922 */
66eb19af 923 if (ucontext)
cfdda9d7 924 memsize = roundup(memsize, PAGE_SIZE);
1973e8b8 925 chp->cq.size = hwentries;
cfdda9d7 926 chp->cq.memsize = memsize;
cf38be6d 927 chp->cq.vector = vector;
cfdda9d7
SW
928
929 ret = create_cq(&rhp->rdev, &chp->cq,
930 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
931 if (ret)
932 goto err1;
933
934 chp->rhp = rhp;
935 chp->cq.size--; /* status page */
1973e8b8 936 chp->ibcq.cqe = entries - 2;
cfdda9d7 937 spin_lock_init(&chp->lock);
581bbe2c 938 spin_lock_init(&chp->comp_handler_lock);
cfdda9d7
SW
939 atomic_set(&chp->refcnt, 1);
940 init_waitqueue_head(&chp->wait);
941 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
942 if (ret)
943 goto err2;
944
945 if (ucontext) {
946 mm = kmalloc(sizeof *mm, GFP_KERNEL);
947 if (!mm)
948 goto err3;
949 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
950 if (!mm2)
951 goto err4;
952
953 uresp.qid_mask = rhp->rdev.cqmask;
954 uresp.cqid = chp->cq.cqid;
955 uresp.size = chp->cq.size;
956 uresp.memsize = chp->cq.memsize;
957 spin_lock(&ucontext->mmap_lock);
958 uresp.key = ucontext->key;
959 ucontext->key += PAGE_SIZE;
960 uresp.gts_key = ucontext->key;
961 ucontext->key += PAGE_SIZE;
962 spin_unlock(&ucontext->mmap_lock);
b6f04d3d
YD
963 ret = ib_copy_to_udata(udata, &uresp,
964 sizeof(uresp) - sizeof(uresp.reserved));
cfdda9d7
SW
965 if (ret)
966 goto err5;
967
968 mm->key = uresp.key;
969 mm->addr = virt_to_phys(chp->cq.queue);
970 mm->len = chp->cq.memsize;
971 insert_mmap(ucontext, mm);
972
973 mm2->key = uresp.gts_key;
974 mm2->addr = chp->cq.ugts;
975 mm2->len = PAGE_SIZE;
976 insert_mmap(ucontext, mm2);
977 }
978 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
979 __func__, chp->cq.cqid, chp, chp->cq.size,
6198dd8d 980 chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
cfdda9d7
SW
981 return &chp->ibcq;
982err5:
983 kfree(mm2);
984err4:
985 kfree(mm);
986err3:
987 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
988err2:
989 destroy_cq(&chp->rhp->rdev, &chp->cq,
990 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
991err1:
992 kfree(chp);
993 return ERR_PTR(ret);
994}
995
996int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
997{
998 return -ENOSYS;
999}
1000
1001int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1002{
1003 struct c4iw_cq *chp;
1004 int ret;
1005 unsigned long flag;
1006
1007 chp = to_c4iw_cq(ibcq);
1008 spin_lock_irqsave(&chp->lock, flag);
1009 ret = t4_arm_cq(&chp->cq,
1010 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1011 spin_unlock_irqrestore(&chp->lock, flag);
1012 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
1013 ret = 0;
1014 return ret;
1015}