RDMA/cxgb4: Add driver for Chelsio T4 RNIC
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb4 / cq.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "iw_cxgb4.h"
34
35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36 struct c4iw_dev_ucontext *uctx)
37{
38 struct fw_ri_res_wr *res_wr;
39 struct fw_ri_res *res;
40 int wr_len;
41 struct c4iw_wr_wait wr_wait;
42 struct sk_buff *skb;
43 int ret;
44
45 wr_len = sizeof *res_wr + sizeof *res;
46 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
47 if (!skb)
48 return -ENOMEM;
49 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
50
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) |
56 FW_WR_COMPL(1));
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (u64)&wr_wait;
59 res = res_wr->res;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET;
62 res->u.cq.iqid = cpu_to_be32(cq->cqid);
63
64 c4iw_init_wr_wait(&wr_wait);
65 ret = c4iw_ofld_send(rdev, skb);
66 if (!ret) {
67 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
68 if (!wr_wait.done) {
69 printk(KERN_ERR MOD "Device %s not responding!\n",
70 pci_name(rdev->lldi.pdev));
71 rdev->flags = T4_FATAL_ERROR;
72 ret = -EIO;
73 } else
74 ret = wr_wait.ret;
75 }
76
77 kfree(cq->sw_queue);
78 dma_free_coherent(&(rdev->lldi.pdev->dev),
79 cq->memsize, cq->queue,
80 pci_unmap_addr(cq, mapping));
81 c4iw_put_cqid(rdev, cq->cqid, uctx);
82 return ret;
83}
84
85static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
86 struct c4iw_dev_ucontext *uctx)
87{
88 struct fw_ri_res_wr *res_wr;
89 struct fw_ri_res *res;
90 int wr_len;
91 int user = (uctx != &rdev->uctx);
92 struct c4iw_wr_wait wr_wait;
93 int ret;
94 struct sk_buff *skb;
95
96 cq->cqid = c4iw_get_cqid(rdev, uctx);
97 if (!cq->cqid) {
98 ret = -ENOMEM;
99 goto err1;
100 }
101
102 if (!user) {
103 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
104 if (!cq->sw_queue) {
105 ret = -ENOMEM;
106 goto err2;
107 }
108 }
109 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
110 &cq->dma_addr, GFP_KERNEL);
111 if (!cq->queue) {
112 ret = -ENOMEM;
113 goto err3;
114 }
115 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
116 memset(cq->queue, 0, cq->memsize);
117
118 /* build fw_ri_res_wr */
119 wr_len = sizeof *res_wr + sizeof *res;
120
121 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
122 if (!skb) {
123 ret = -ENOMEM;
124 goto err4;
125 }
126 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
127
128 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
129 memset(res_wr, 0, wr_len);
130 res_wr->op_nres = cpu_to_be32(
131 FW_WR_OP(FW_RI_RES_WR) |
132 V_FW_RI_RES_WR_NRES(1) |
133 FW_WR_COMPL(1));
134 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
135 res_wr->cookie = (u64)&wr_wait;
136 res = res_wr->res;
137 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
138 res->u.cq.op = FW_RI_RES_OP_WRITE;
139 res->u.cq.iqid = cpu_to_be32(cq->cqid);
140 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
141 V_FW_RI_RES_WR_IQANUS(0) |
142 V_FW_RI_RES_WR_IQANUD(1) |
143 F_FW_RI_RES_WR_IQANDST |
144 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
145 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
146 F_FW_RI_RES_WR_IQDROPRSS |
147 V_FW_RI_RES_WR_IQPCIECH(2) |
148 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
149 F_FW_RI_RES_WR_IQO |
150 V_FW_RI_RES_WR_IQESIZE(1));
151 res->u.cq.iqsize = cpu_to_be16(cq->size);
152 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
153
154 c4iw_init_wr_wait(&wr_wait);
155
156 ret = c4iw_ofld_send(rdev, skb);
157 if (ret)
158 goto err4;
159 PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
160 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
161 if (!wr_wait.done) {
162 printk(KERN_ERR MOD "Device %s not responding!\n",
163 pci_name(rdev->lldi.pdev));
164 rdev->flags = T4_FATAL_ERROR;
165 ret = -EIO;
166 } else
167 ret = wr_wait.ret;
168 if (ret)
169 goto err4;
170
171 cq->gen = 1;
172 cq->gts = rdev->lldi.gts_reg;
173 cq->rdev = rdev;
174 if (user) {
175 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
176 (cq->cqid << rdev->cqshift);
177 cq->ugts &= PAGE_MASK;
178 }
179 return 0;
180err4:
181 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
182 pci_unmap_addr(cq, mapping));
183err3:
184 kfree(cq->sw_queue);
185err2:
186 c4iw_put_cqid(rdev, cq->cqid, uctx);
187err1:
188 return ret;
189}
190
191static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
192{
193 struct t4_cqe cqe;
194
195 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
196 wq, cq, cq->sw_cidx, cq->sw_pidx);
197 memset(&cqe, 0, sizeof(cqe));
198 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
199 V_CQE_OPCODE(FW_RI_SEND) |
200 V_CQE_TYPE(0) |
201 V_CQE_SWCQE(1) |
202 V_CQE_QPID(wq->rq.qid));
203 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
204 cq->sw_queue[cq->sw_pidx] = cqe;
205 t4_swcq_produce(cq);
206}
207
208int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
209{
210 int flushed = 0;
211 int in_use = wq->rq.in_use - count;
212
213 BUG_ON(in_use < 0);
214 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
215 wq, cq, wq->rq.in_use, count);
216 while (in_use--) {
217 insert_recv_cqe(wq, cq);
218 flushed++;
219 }
220 return flushed;
221}
222
223static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
224 struct t4_swsqe *swcqe)
225{
226 struct t4_cqe cqe;
227
228 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
229 wq, cq, cq->sw_cidx, cq->sw_pidx);
230 memset(&cqe, 0, sizeof(cqe));
231 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
232 V_CQE_OPCODE(swcqe->opcode) |
233 V_CQE_TYPE(1) |
234 V_CQE_SWCQE(1) |
235 V_CQE_QPID(wq->sq.qid));
236 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
237 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
238 cq->sw_queue[cq->sw_pidx] = cqe;
239 t4_swcq_produce(cq);
240}
241
242int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
243{
244 int flushed = 0;
245 struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
246 int in_use = wq->sq.in_use - count;
247
248 BUG_ON(in_use < 0);
249 while (in_use--) {
250 swsqe->signaled = 0;
251 insert_sq_cqe(wq, cq, swsqe);
252 swsqe++;
253 if (swsqe == (wq->sq.sw_sq + wq->sq.size))
254 swsqe = wq->sq.sw_sq;
255 flushed++;
256 }
257 return flushed;
258}
259
260/*
261 * Move all CQEs from the HWCQ into the SWCQ.
262 */
263void c4iw_flush_hw_cq(struct t4_cq *cq)
264{
265 struct t4_cqe *cqe = NULL, *swcqe;
266 int ret;
267
268 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
269 ret = t4_next_hw_cqe(cq, &cqe);
270 while (!ret) {
271 PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
272 __func__, cq->cidx, cq->sw_pidx);
273 swcqe = &cq->sw_queue[cq->sw_pidx];
274 *swcqe = *cqe;
275 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
276 t4_swcq_produce(cq);
277 t4_hwcq_consume(cq);
278 ret = t4_next_hw_cqe(cq, &cqe);
279 }
280}
281
282static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
283{
284 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
285 return 0;
286
287 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
288 return 0;
289
290 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
291 return 0;
292
293 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
294 return 0;
295 return 1;
296}
297
298void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
299{
300 struct t4_cqe *cqe;
301 u32 ptr;
302
303 *count = 0;
304 ptr = cq->sw_cidx;
305 while (ptr != cq->sw_pidx) {
306 cqe = &cq->sw_queue[ptr];
307 if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
308 wq->sq.oldest_read)) &&
309 (CQE_QPID(cqe) == wq->sq.qid))
310 (*count)++;
311 if (++ptr == cq->size)
312 ptr = 0;
313 }
314 PDBG("%s cq %p count %d\n", __func__, cq, *count);
315}
316
317void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
318{
319 struct t4_cqe *cqe;
320 u32 ptr;
321
322 *count = 0;
323 PDBG("%s count zero %d\n", __func__, *count);
324 ptr = cq->sw_cidx;
325 while (ptr != cq->sw_pidx) {
326 cqe = &cq->sw_queue[ptr];
327 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
328 (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
329 (*count)++;
330 if (++ptr == cq->size)
331 ptr = 0;
332 }
333 PDBG("%s cq %p count %d\n", __func__, cq, *count);
334}
335
336static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
337{
338 struct t4_swsqe *swsqe;
339 u16 ptr = wq->sq.cidx;
340 int count = wq->sq.in_use;
341 int unsignaled = 0;
342
343 swsqe = &wq->sq.sw_sq[ptr];
344 while (count--)
345 if (!swsqe->signaled) {
346 if (++ptr == wq->sq.size)
347 ptr = 0;
348 swsqe = &wq->sq.sw_sq[ptr];
349 unsignaled++;
350 } else if (swsqe->complete) {
351
352 /*
353 * Insert this completed cqe into the swcq.
354 */
355 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
356 __func__, ptr, cq->sw_pidx);
357 swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
358 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
359 t4_swcq_produce(cq);
360 swsqe->signaled = 0;
361 wq->sq.in_use -= unsignaled;
362 break;
363 } else
364 break;
365}
366
367static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
368 struct t4_cqe *read_cqe)
369{
370 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
371 read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
372 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
373 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
374 V_CQE_OPCODE(FW_RI_READ_REQ) |
375 V_CQE_TYPE(1));
376}
377
378/*
379 * Return a ptr to the next read wr in the SWSQ or NULL.
380 */
381static void advance_oldest_read(struct t4_wq *wq)
382{
383
384 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
385
386 if (rptr == wq->sq.size)
387 rptr = 0;
388 while (rptr != wq->sq.pidx) {
389 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
390
391 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
392 return;
393 if (++rptr == wq->sq.size)
394 rptr = 0;
395 }
396 wq->sq.oldest_read = NULL;
397}
398
399/*
400 * poll_cq
401 *
402 * Caller must:
403 * check the validity of the first CQE,
404 * supply the wq assicated with the qpid.
405 *
406 * credit: cq credit to return to sge.
407 * cqe_flushed: 1 iff the CQE is flushed.
408 * cqe: copy of the polled CQE.
409 *
410 * return value:
411 * 0 CQE returned ok.
412 * -EAGAIN CQE skipped, try again.
413 * -EOVERFLOW CQ overflow detected.
414 */
415static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
416 u8 *cqe_flushed, u64 *cookie, u32 *credit)
417{
418 int ret = 0;
419 struct t4_cqe *hw_cqe, read_cqe;
420
421 *cqe_flushed = 0;
422 *credit = 0;
423 ret = t4_next_cqe(cq, &hw_cqe);
424 if (ret)
425 return ret;
426
427 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
428 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
429 __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
430 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
431 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
432 CQE_WRID_LOW(hw_cqe));
433
434 /*
435 * skip cqe's not affiliated with a QP.
436 */
437 if (wq == NULL) {
438 ret = -EAGAIN;
439 goto skip_cqe;
440 }
441
442 /*
443 * Gotta tweak READ completions:
444 * 1) the cqe doesn't contain the sq_wptr from the wr.
445 * 2) opcode not reflected from the wr.
446 * 3) read_len not reflected from the wr.
447 * 4) cq_type is RQ_TYPE not SQ_TYPE.
448 */
449 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
450
451 /*
452 * If this is an unsolicited read response, then the read
453 * was generated by the kernel driver as part of peer-2-peer
454 * connection setup. So ignore the completion.
455 */
456 if (!wq->sq.oldest_read) {
457 if (CQE_STATUS(hw_cqe))
458 t4_set_wq_in_error(wq);
459 ret = -EAGAIN;
460 goto skip_cqe;
461 }
462
463 /*
464 * Don't write to the HWCQ, so create a new read req CQE
465 * in local memory.
466 */
467 create_read_req_cqe(wq, hw_cqe, &read_cqe);
468 hw_cqe = &read_cqe;
469 advance_oldest_read(wq);
470 }
471
472 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
473 *cqe_flushed = t4_wq_in_error(wq);
474 t4_set_wq_in_error(wq);
475 goto proc_cqe;
476 }
477
478 /*
479 * RECV completion.
480 */
481 if (RQ_TYPE(hw_cqe)) {
482
483 /*
484 * HW only validates 4 bits of MSN. So we must validate that
485 * the MSN in the SEND is the next expected MSN. If its not,
486 * then we complete this with T4_ERR_MSN and mark the wq in
487 * error.
488 */
489
490 if (t4_rq_empty(wq)) {
491 t4_set_wq_in_error(wq);
492 ret = -EAGAIN;
493 goto skip_cqe;
494 }
495 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
496 t4_set_wq_in_error(wq);
497 hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
498 goto proc_cqe;
499 }
500 goto proc_cqe;
501 }
502
503 /*
504 * If we get here its a send completion.
505 *
506 * Handle out of order completion. These get stuffed
507 * in the SW SQ. Then the SW SQ is walked to move any
508 * now in-order completions into the SW CQ. This handles
509 * 2 cases:
510 * 1) reaping unsignaled WRs when the first subsequent
511 * signaled WR is completed.
512 * 2) out of order read completions.
513 */
514 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
515 struct t4_swsqe *swsqe;
516
517 PDBG("%s out of order completion going in sw_sq at idx %u\n",
518 __func__, CQE_WRID_SQ_IDX(hw_cqe));
519 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
520 swsqe->cqe = *hw_cqe;
521 swsqe->complete = 1;
522 ret = -EAGAIN;
523 goto flush_wq;
524 }
525
526proc_cqe:
527 *cqe = *hw_cqe;
528
529 /*
530 * Reap the associated WR(s) that are freed up with this
531 * completion.
532 */
533 if (SQ_TYPE(hw_cqe)) {
534 wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
535 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
536 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
537 t4_sq_consume(wq);
538 } else {
539 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
540 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
541 BUG_ON(t4_rq_empty(wq));
542 t4_rq_consume(wq);
543 }
544
545flush_wq:
546 /*
547 * Flush any completed cqes that are now in-order.
548 */
549 flush_completed_wrs(wq, cq);
550
551skip_cqe:
552 if (SW_CQE(hw_cqe)) {
553 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
554 __func__, cq, cq->cqid, cq->sw_cidx);
555 t4_swcq_consume(cq);
556 } else {
557 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
558 __func__, cq, cq->cqid, cq->cidx);
559 t4_hwcq_consume(cq);
560 }
561 return ret;
562}
563
564/*
565 * Get one cq entry from c4iw and map it to openib.
566 *
567 * Returns:
568 * 0 cqe returned
569 * -ENODATA EMPTY;
570 * -EAGAIN caller must try again
571 * any other -errno fatal error
572 */
573static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
574{
575 struct c4iw_qp *qhp = NULL;
576 struct t4_cqe cqe = {0, 0}, *rd_cqe;
577 struct t4_wq *wq;
578 u32 credit = 0;
579 u8 cqe_flushed;
580 u64 cookie = 0;
581 int ret;
582
583 ret = t4_next_cqe(&chp->cq, &rd_cqe);
584
585 if (ret)
586 return ret;
587
588 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
589 if (!qhp)
590 wq = NULL;
591 else {
592 spin_lock(&qhp->lock);
593 wq = &(qhp->wq);
594 }
595 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
596 if (ret)
597 goto out;
598
599 wc->wr_id = cookie;
600 wc->qp = &qhp->ibqp;
601 wc->vendor_err = CQE_STATUS(&cqe);
602 wc->wc_flags = 0;
603
604 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
605 "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
606 CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
607 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
608
609 if (CQE_TYPE(&cqe) == 0) {
610 if (!CQE_STATUS(&cqe))
611 wc->byte_len = CQE_LEN(&cqe);
612 else
613 wc->byte_len = 0;
614 wc->opcode = IB_WC_RECV;
615 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
616 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
617 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
618 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
619 }
620 } else {
621 switch (CQE_OPCODE(&cqe)) {
622 case FW_RI_RDMA_WRITE:
623 wc->opcode = IB_WC_RDMA_WRITE;
624 break;
625 case FW_RI_READ_REQ:
626 wc->opcode = IB_WC_RDMA_READ;
627 wc->byte_len = CQE_LEN(&cqe);
628 break;
629 case FW_RI_SEND_WITH_INV:
630 case FW_RI_SEND_WITH_SE_INV:
631 wc->opcode = IB_WC_SEND;
632 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
633 break;
634 case FW_RI_SEND:
635 case FW_RI_SEND_WITH_SE:
636 wc->opcode = IB_WC_SEND;
637 break;
638 case FW_RI_BIND_MW:
639 wc->opcode = IB_WC_BIND_MW;
640 break;
641
642 case FW_RI_LOCAL_INV:
643 wc->opcode = IB_WC_LOCAL_INV;
644 break;
645 case FW_RI_FAST_REGISTER:
646 wc->opcode = IB_WC_FAST_REG_MR;
647 break;
648 default:
649 printk(KERN_ERR MOD "Unexpected opcode %d "
650 "in the CQE received for QPID=0x%0x\n",
651 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
652 ret = -EINVAL;
653 goto out;
654 }
655 }
656
657 if (cqe_flushed)
658 wc->status = IB_WC_WR_FLUSH_ERR;
659 else {
660
661 switch (CQE_STATUS(&cqe)) {
662 case T4_ERR_SUCCESS:
663 wc->status = IB_WC_SUCCESS;
664 break;
665 case T4_ERR_STAG:
666 wc->status = IB_WC_LOC_ACCESS_ERR;
667 break;
668 case T4_ERR_PDID:
669 wc->status = IB_WC_LOC_PROT_ERR;
670 break;
671 case T4_ERR_QPID:
672 case T4_ERR_ACCESS:
673 wc->status = IB_WC_LOC_ACCESS_ERR;
674 break;
675 case T4_ERR_WRAP:
676 wc->status = IB_WC_GENERAL_ERR;
677 break;
678 case T4_ERR_BOUND:
679 wc->status = IB_WC_LOC_LEN_ERR;
680 break;
681 case T4_ERR_INVALIDATE_SHARED_MR:
682 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
683 wc->status = IB_WC_MW_BIND_ERR;
684 break;
685 case T4_ERR_CRC:
686 case T4_ERR_MARKER:
687 case T4_ERR_PDU_LEN_ERR:
688 case T4_ERR_OUT_OF_RQE:
689 case T4_ERR_DDP_VERSION:
690 case T4_ERR_RDMA_VERSION:
691 case T4_ERR_DDP_QUEUE_NUM:
692 case T4_ERR_MSN:
693 case T4_ERR_TBIT:
694 case T4_ERR_MO:
695 case T4_ERR_MSN_RANGE:
696 case T4_ERR_IRD_OVERFLOW:
697 case T4_ERR_OPCODE:
698 wc->status = IB_WC_FATAL_ERR;
699 break;
700 case T4_ERR_SWFLUSH:
701 wc->status = IB_WC_WR_FLUSH_ERR;
702 break;
703 default:
704 printk(KERN_ERR MOD
705 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
706 CQE_STATUS(&cqe), CQE_QPID(&cqe));
707 ret = -EINVAL;
708 }
709 }
710out:
711 if (wq)
712 spin_unlock(&qhp->lock);
713 return ret;
714}
715
716int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
717{
718 struct c4iw_cq *chp;
719 unsigned long flags;
720 int npolled;
721 int err = 0;
722
723 chp = to_c4iw_cq(ibcq);
724
725 spin_lock_irqsave(&chp->lock, flags);
726 for (npolled = 0; npolled < num_entries; ++npolled) {
727 do {
728 err = c4iw_poll_cq_one(chp, wc + npolled);
729 } while (err == -EAGAIN);
730 if (err)
731 break;
732 }
733 spin_unlock_irqrestore(&chp->lock, flags);
734 return !err || err == -ENODATA ? npolled : err;
735}
736
737int c4iw_destroy_cq(struct ib_cq *ib_cq)
738{
739 struct c4iw_cq *chp;
740 struct c4iw_ucontext *ucontext;
741
742 PDBG("%s ib_cq %p\n", __func__, ib_cq);
743 chp = to_c4iw_cq(ib_cq);
744
745 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
746 atomic_dec(&chp->refcnt);
747 wait_event(chp->wait, !atomic_read(&chp->refcnt));
748
749 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
750 : NULL;
751 destroy_cq(&chp->rhp->rdev, &chp->cq,
752 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
753 kfree(chp);
754 return 0;
755}
756
757struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
758 int vector, struct ib_ucontext *ib_context,
759 struct ib_udata *udata)
760{
761 struct c4iw_dev *rhp;
762 struct c4iw_cq *chp;
763 struct c4iw_create_cq_resp uresp;
764 struct c4iw_ucontext *ucontext = NULL;
765 int ret;
766 size_t memsize;
767 struct c4iw_mm_entry *mm, *mm2;
768
769 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
770
771 rhp = to_c4iw_dev(ibdev);
772
773 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
774 if (!chp)
775 return ERR_PTR(-ENOMEM);
776
777 if (ib_context)
778 ucontext = to_c4iw_ucontext(ib_context);
779
780 /* account for the status page. */
781 entries++;
782
783 /*
784 * entries must be multiple of 16 for HW.
785 */
786 entries = roundup(entries, 16);
787 memsize = entries * sizeof *chp->cq.queue;
788
789 /*
790 * memsize must be a multiple of the page size if its a user cq.
791 */
792 if (ucontext)
793 memsize = roundup(memsize, PAGE_SIZE);
794 chp->cq.size = entries;
795 chp->cq.memsize = memsize;
796
797 ret = create_cq(&rhp->rdev, &chp->cq,
798 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
799 if (ret)
800 goto err1;
801
802 chp->rhp = rhp;
803 chp->cq.size--; /* status page */
804 chp->ibcq.cqe = chp->cq.size;
805 spin_lock_init(&chp->lock);
806 atomic_set(&chp->refcnt, 1);
807 init_waitqueue_head(&chp->wait);
808 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
809 if (ret)
810 goto err2;
811
812 if (ucontext) {
813 mm = kmalloc(sizeof *mm, GFP_KERNEL);
814 if (!mm)
815 goto err3;
816 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
817 if (!mm2)
818 goto err4;
819
820 uresp.qid_mask = rhp->rdev.cqmask;
821 uresp.cqid = chp->cq.cqid;
822 uresp.size = chp->cq.size;
823 uresp.memsize = chp->cq.memsize;
824 spin_lock(&ucontext->mmap_lock);
825 uresp.key = ucontext->key;
826 ucontext->key += PAGE_SIZE;
827 uresp.gts_key = ucontext->key;
828 ucontext->key += PAGE_SIZE;
829 spin_unlock(&ucontext->mmap_lock);
830 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
831 if (ret)
832 goto err5;
833
834 mm->key = uresp.key;
835 mm->addr = virt_to_phys(chp->cq.queue);
836 mm->len = chp->cq.memsize;
837 insert_mmap(ucontext, mm);
838
839 mm2->key = uresp.gts_key;
840 mm2->addr = chp->cq.ugts;
841 mm2->len = PAGE_SIZE;
842 insert_mmap(ucontext, mm2);
843 }
844 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
845 __func__, chp->cq.cqid, chp, chp->cq.size,
846 chp->cq.memsize,
847 (unsigned long long) chp->cq.dma_addr);
848 return &chp->ibcq;
849err5:
850 kfree(mm2);
851err4:
852 kfree(mm);
853err3:
854 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
855err2:
856 destroy_cq(&chp->rhp->rdev, &chp->cq,
857 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
858err1:
859 kfree(chp);
860 return ERR_PTR(ret);
861}
862
863int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
864{
865 return -ENOSYS;
866}
867
868int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
869{
870 struct c4iw_cq *chp;
871 int ret;
872 unsigned long flag;
873
874 chp = to_c4iw_cq(ibcq);
875 spin_lock_irqsave(&chp->lock, flag);
876 ret = t4_arm_cq(&chp->cq,
877 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
878 spin_unlock_irqrestore(&chp->lock, flag);
879 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
880 ret = 0;
881 return ret;
882}