ce468e54242881096ce1c2ce8691b44e43b0bab9
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb4 / cq.c
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "iw_cxgb4.h"
34
35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
36                       struct c4iw_dev_ucontext *uctx)
37 {
38         struct fw_ri_res_wr *res_wr;
39         struct fw_ri_res *res;
40         int wr_len;
41         struct c4iw_wr_wait wr_wait;
42         struct sk_buff *skb;
43         int ret;
44
45         wr_len = sizeof *res_wr + sizeof *res;
46         skb = alloc_skb(wr_len, GFP_KERNEL);
47         if (!skb)
48                 return -ENOMEM;
49         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
50
51         res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52         memset(res_wr, 0, wr_len);
53         res_wr->op_nres = cpu_to_be32(
54                         FW_WR_OP(FW_RI_RES_WR) |
55                         V_FW_RI_RES_WR_NRES(1) |
56                         FW_WR_COMPL(1));
57         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58         res_wr->cookie = (unsigned long) &wr_wait;
59         res = res_wr->res;
60         res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61         res->u.cq.op = FW_RI_RES_OP_RESET;
62         res->u.cq.iqid = cpu_to_be32(cq->cqid);
63
64         c4iw_init_wr_wait(&wr_wait);
65         ret = c4iw_ofld_send(rdev, skb);
66         if (!ret) {
67                 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
68         }
69
70         kfree(cq->sw_queue);
71         dma_free_coherent(&(rdev->lldi.pdev->dev),
72                           cq->memsize, cq->queue,
73                           dma_unmap_addr(cq, mapping));
74         c4iw_put_cqid(rdev, cq->cqid, uctx);
75         return ret;
76 }
77
78 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
79                      struct c4iw_dev_ucontext *uctx)
80 {
81         struct fw_ri_res_wr *res_wr;
82         struct fw_ri_res *res;
83         int wr_len;
84         int user = (uctx != &rdev->uctx);
85         struct c4iw_wr_wait wr_wait;
86         int ret;
87         struct sk_buff *skb;
88
89         cq->cqid = c4iw_get_cqid(rdev, uctx);
90         if (!cq->cqid) {
91                 ret = -ENOMEM;
92                 goto err1;
93         }
94
95         if (!user) {
96                 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
97                 if (!cq->sw_queue) {
98                         ret = -ENOMEM;
99                         goto err2;
100                 }
101         }
102         cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
103                                        &cq->dma_addr, GFP_KERNEL);
104         if (!cq->queue) {
105                 ret = -ENOMEM;
106                 goto err3;
107         }
108         dma_unmap_addr_set(cq, mapping, cq->dma_addr);
109         memset(cq->queue, 0, cq->memsize);
110
111         /* build fw_ri_res_wr */
112         wr_len = sizeof *res_wr + sizeof *res;
113
114         skb = alloc_skb(wr_len, GFP_KERNEL);
115         if (!skb) {
116                 ret = -ENOMEM;
117                 goto err4;
118         }
119         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
120
121         res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
122         memset(res_wr, 0, wr_len);
123         res_wr->op_nres = cpu_to_be32(
124                         FW_WR_OP(FW_RI_RES_WR) |
125                         V_FW_RI_RES_WR_NRES(1) |
126                         FW_WR_COMPL(1));
127         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
128         res_wr->cookie = (unsigned long) &wr_wait;
129         res = res_wr->res;
130         res->u.cq.restype = FW_RI_RES_TYPE_CQ;
131         res->u.cq.op = FW_RI_RES_OP_WRITE;
132         res->u.cq.iqid = cpu_to_be32(cq->cqid);
133         res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
134                         V_FW_RI_RES_WR_IQANUS(0) |
135                         V_FW_RI_RES_WR_IQANUD(1) |
136                         F_FW_RI_RES_WR_IQANDST |
137                         V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
138         res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
139                         F_FW_RI_RES_WR_IQDROPRSS |
140                         V_FW_RI_RES_WR_IQPCIECH(2) |
141                         V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
142                         F_FW_RI_RES_WR_IQO |
143                         V_FW_RI_RES_WR_IQESIZE(1));
144         res->u.cq.iqsize = cpu_to_be16(cq->size);
145         res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
146
147         c4iw_init_wr_wait(&wr_wait);
148
149         ret = c4iw_ofld_send(rdev, skb);
150         if (ret)
151                 goto err4;
152         PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
153         ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
154         if (ret)
155                 goto err4;
156
157         cq->gen = 1;
158         cq->gts = rdev->lldi.gts_reg;
159         cq->rdev = rdev;
160         if (user) {
161                 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
162                                         (cq->cqid << rdev->cqshift);
163                 cq->ugts &= PAGE_MASK;
164         }
165         return 0;
166 err4:
167         dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
168                           dma_unmap_addr(cq, mapping));
169 err3:
170         kfree(cq->sw_queue);
171 err2:
172         c4iw_put_cqid(rdev, cq->cqid, uctx);
173 err1:
174         return ret;
175 }
176
177 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
178 {
179         struct t4_cqe cqe;
180
181         PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
182              wq, cq, cq->sw_cidx, cq->sw_pidx);
183         memset(&cqe, 0, sizeof(cqe));
184         cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
185                                  V_CQE_OPCODE(FW_RI_SEND) |
186                                  V_CQE_TYPE(0) |
187                                  V_CQE_SWCQE(1) |
188                                  V_CQE_QPID(wq->sq.qid));
189         cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
190         cq->sw_queue[cq->sw_pidx] = cqe;
191         t4_swcq_produce(cq);
192 }
193
194 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
195 {
196         int flushed = 0;
197         int in_use = wq->rq.in_use - count;
198
199         BUG_ON(in_use < 0);
200         PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
201              wq, cq, wq->rq.in_use, count);
202         while (in_use--) {
203                 insert_recv_cqe(wq, cq);
204                 flushed++;
205         }
206         return flushed;
207 }
208
209 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
210                           struct t4_swsqe *swcqe)
211 {
212         struct t4_cqe cqe;
213
214         PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
215              wq, cq, cq->sw_cidx, cq->sw_pidx);
216         memset(&cqe, 0, sizeof(cqe));
217         cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
218                                  V_CQE_OPCODE(swcqe->opcode) |
219                                  V_CQE_TYPE(1) |
220                                  V_CQE_SWCQE(1) |
221                                  V_CQE_QPID(wq->sq.qid));
222         CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
223         cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
224         cq->sw_queue[cq->sw_pidx] = cqe;
225         t4_swcq_produce(cq);
226 }
227
228 static void advance_oldest_read(struct t4_wq *wq);
229
230 int c4iw_flush_sq(struct c4iw_qp *qhp)
231 {
232         int flushed = 0;
233         struct t4_wq *wq = &qhp->wq;
234         struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
235         struct t4_cq *cq = &chp->cq;
236         int idx;
237         struct t4_swsqe *swsqe;
238         int error = (qhp->attr.state != C4IW_QP_STATE_CLOSING &&
239                         qhp->attr.state != C4IW_QP_STATE_IDLE);
240
241         if (wq->sq.flush_cidx == -1)
242                 wq->sq.flush_cidx = wq->sq.cidx;
243         idx = wq->sq.flush_cidx;
244         BUG_ON(idx >= wq->sq.size);
245         while (idx != wq->sq.pidx) {
246                 if (error) {
247                         swsqe = &wq->sq.sw_sq[idx];
248                         BUG_ON(swsqe->flushed);
249                         swsqe->flushed = 1;
250                         insert_sq_cqe(wq, cq, swsqe);
251                         if (wq->sq.oldest_read == swsqe) {
252                                 BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
253                                 advance_oldest_read(wq);
254                         }
255                         flushed++;
256                 } else {
257                         t4_sq_consume(wq);
258                 }
259                 if (++idx == wq->sq.size)
260                         idx = 0;
261         }
262         wq->sq.flush_cidx += flushed;
263         if (wq->sq.flush_cidx >= wq->sq.size)
264                 wq->sq.flush_cidx -= wq->sq.size;
265         return flushed;
266 }
267
268 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
269 {
270         struct t4_swsqe *swsqe;
271         int cidx;
272
273         if (wq->sq.flush_cidx == -1)
274                 wq->sq.flush_cidx = wq->sq.cidx;
275         cidx = wq->sq.flush_cidx;
276         BUG_ON(cidx > wq->sq.size);
277
278         while (cidx != wq->sq.pidx) {
279                 swsqe = &wq->sq.sw_sq[cidx];
280                 if (!swsqe->signaled) {
281                         if (++cidx == wq->sq.size)
282                                 cidx = 0;
283                 } else if (swsqe->complete) {
284
285                         BUG_ON(swsqe->flushed);
286
287                         /*
288                          * Insert this completed cqe into the swcq.
289                          */
290                         PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
291                                         __func__, cidx, cq->sw_pidx);
292                         swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
293                         cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
294                         t4_swcq_produce(cq);
295                         swsqe->flushed = 1;
296                         if (++cidx == wq->sq.size)
297                                 cidx = 0;
298                         wq->sq.flush_cidx = cidx;
299                 } else
300                         break;
301         }
302 }
303
304 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
305                 struct t4_cqe *read_cqe)
306 {
307         read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
308         read_cqe->len = htonl(wq->sq.oldest_read->read_len);
309         read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
310                         V_CQE_SWCQE(SW_CQE(hw_cqe)) |
311                         V_CQE_OPCODE(FW_RI_READ_REQ) |
312                         V_CQE_TYPE(1));
313         read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
314 }
315
316 static void advance_oldest_read(struct t4_wq *wq)
317 {
318
319         u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
320
321         if (rptr == wq->sq.size)
322                 rptr = 0;
323         while (rptr != wq->sq.pidx) {
324                 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
325
326                 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
327                         return;
328                 if (++rptr == wq->sq.size)
329                         rptr = 0;
330         }
331         wq->sq.oldest_read = NULL;
332 }
333
334 /*
335  * Move all CQEs from the HWCQ into the SWCQ.
336  * Deal with out-of-order and/or completions that complete
337  * prior unsignalled WRs.
338  */
339 void c4iw_flush_hw_cq(struct c4iw_cq *chp)
340 {
341         struct t4_cqe *hw_cqe, *swcqe, read_cqe;
342         struct c4iw_qp *qhp;
343         struct t4_swsqe *swsqe;
344         int ret;
345
346         PDBG("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
347         ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
348
349         /*
350          * This logic is similar to poll_cq(), but not quite the same
351          * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
352          * also do any translation magic that poll_cq() normally does.
353          */
354         while (!ret) {
355                 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
356
357                 /*
358                  * drop CQEs with no associated QP
359                  */
360                 if (qhp == NULL)
361                         goto next_cqe;
362
363                 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
364                         goto next_cqe;
365
366                 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
367
368                         /* If we have reached here because of async
369                          * event or other error, and have egress error
370                          * then drop
371                          */
372                         if (CQE_TYPE(hw_cqe) == 1)
373                                 goto next_cqe;
374
375                         /* drop peer2peer RTR reads.
376                          */
377                         if (CQE_WRID_STAG(hw_cqe) == 1)
378                                 goto next_cqe;
379
380                         /*
381                          * Eat completions for unsignaled read WRs.
382                          */
383                         if (!qhp->wq.sq.oldest_read->signaled) {
384                                 advance_oldest_read(&qhp->wq);
385                                 goto next_cqe;
386                         }
387
388                         /*
389                          * Don't write to the HWCQ, create a new read req CQE
390                          * in local memory and move it into the swcq.
391                          */
392                         create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
393                         hw_cqe = &read_cqe;
394                         advance_oldest_read(&qhp->wq);
395                 }
396
397                 /* if its a SQ completion, then do the magic to move all the
398                  * unsignaled and now in-order completions into the swcq.
399                  */
400                 if (SQ_TYPE(hw_cqe)) {
401                         swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
402                         swsqe->cqe = *hw_cqe;
403                         swsqe->complete = 1;
404                         flush_completed_wrs(&qhp->wq, &chp->cq);
405                 } else {
406                         swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
407                         *swcqe = *hw_cqe;
408                         swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
409                         t4_swcq_produce(&chp->cq);
410                 }
411 next_cqe:
412                 t4_hwcq_consume(&chp->cq);
413                 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
414         }
415 }
416
417 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
418 {
419         if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
420                 return 0;
421
422         if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
423                 return 0;
424
425         if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
426                 return 0;
427
428         if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
429                 return 0;
430         return 1;
431 }
432
433 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
434 {
435         struct t4_cqe *cqe;
436         u32 ptr;
437
438         *count = 0;
439         PDBG("%s count zero %d\n", __func__, *count);
440         ptr = cq->sw_cidx;
441         while (ptr != cq->sw_pidx) {
442                 cqe = &cq->sw_queue[ptr];
443                 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
444                     (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
445                         (*count)++;
446                 if (++ptr == cq->size)
447                         ptr = 0;
448         }
449         PDBG("%s cq %p count %d\n", __func__, cq, *count);
450 }
451
452 /*
453  * poll_cq
454  *
455  * Caller must:
456  *     check the validity of the first CQE,
457  *     supply the wq assicated with the qpid.
458  *
459  * credit: cq credit to return to sge.
460  * cqe_flushed: 1 iff the CQE is flushed.
461  * cqe: copy of the polled CQE.
462  *
463  * return value:
464  *    0             CQE returned ok.
465  *    -EAGAIN       CQE skipped, try again.
466  *    -EOVERFLOW    CQ overflow detected.
467  */
468 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
469                    u8 *cqe_flushed, u64 *cookie, u32 *credit)
470 {
471         int ret = 0;
472         struct t4_cqe *hw_cqe, read_cqe;
473
474         *cqe_flushed = 0;
475         *credit = 0;
476         ret = t4_next_cqe(cq, &hw_cqe);
477         if (ret)
478                 return ret;
479
480         PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
481              " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
482              __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
483              CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
484              CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
485              CQE_WRID_LOW(hw_cqe));
486
487         /*
488          * skip cqe's not affiliated with a QP.
489          */
490         if (wq == NULL) {
491                 ret = -EAGAIN;
492                 goto skip_cqe;
493         }
494
495         /*
496         * skip hw cqe's if the wq is flushed.
497         */
498         if (wq->flushed && !SW_CQE(hw_cqe)) {
499                 ret = -EAGAIN;
500                 goto skip_cqe;
501         }
502
503         /*
504          * skip TERMINATE cqes...
505          */
506         if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
507                 ret = -EAGAIN;
508                 goto skip_cqe;
509         }
510
511         /*
512          * Gotta tweak READ completions:
513          *      1) the cqe doesn't contain the sq_wptr from the wr.
514          *      2) opcode not reflected from the wr.
515          *      3) read_len not reflected from the wr.
516          *      4) cq_type is RQ_TYPE not SQ_TYPE.
517          */
518         if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
519
520                 /* If we have reached here because of async
521                  * event or other error, and have egress error
522                  * then drop
523                  */
524                 if (CQE_TYPE(hw_cqe) == 1) {
525                         if (CQE_STATUS(hw_cqe))
526                                 t4_set_wq_in_error(wq);
527                         ret = -EAGAIN;
528                         goto skip_cqe;
529                 }
530
531                 /* If this is an unsolicited read response, then the read
532                  * was generated by the kernel driver as part of peer-2-peer
533                  * connection setup.  So ignore the completion.
534                  */
535                 if (CQE_WRID_STAG(hw_cqe) == 1) {
536                         if (CQE_STATUS(hw_cqe))
537                                 t4_set_wq_in_error(wq);
538                         ret = -EAGAIN;
539                         goto skip_cqe;
540                 }
541
542                 /*
543                  * Eat completions for unsignaled read WRs.
544                  */
545                 if (!wq->sq.oldest_read->signaled) {
546                         advance_oldest_read(wq);
547                         ret = -EAGAIN;
548                         goto skip_cqe;
549                 }
550
551                 /*
552                  * Don't write to the HWCQ, so create a new read req CQE
553                  * in local memory.
554                  */
555                 create_read_req_cqe(wq, hw_cqe, &read_cqe);
556                 hw_cqe = &read_cqe;
557                 advance_oldest_read(wq);
558         }
559
560         if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
561                 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
562                 t4_set_wq_in_error(wq);
563         }
564
565         /*
566          * RECV completion.
567          */
568         if (RQ_TYPE(hw_cqe)) {
569
570                 /*
571                  * HW only validates 4 bits of MSN.  So we must validate that
572                  * the MSN in the SEND is the next expected MSN.  If its not,
573                  * then we complete this with T4_ERR_MSN and mark the wq in
574                  * error.
575                  */
576
577                 if (t4_rq_empty(wq)) {
578                         t4_set_wq_in_error(wq);
579                         ret = -EAGAIN;
580                         goto skip_cqe;
581                 }
582                 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
583                         t4_set_wq_in_error(wq);
584                         hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
585                         goto proc_cqe;
586                 }
587                 goto proc_cqe;
588         }
589
590         /*
591          * If we get here its a send completion.
592          *
593          * Handle out of order completion. These get stuffed
594          * in the SW SQ. Then the SW SQ is walked to move any
595          * now in-order completions into the SW CQ.  This handles
596          * 2 cases:
597          *      1) reaping unsignaled WRs when the first subsequent
598          *         signaled WR is completed.
599          *      2) out of order read completions.
600          */
601         if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
602                 struct t4_swsqe *swsqe;
603
604                 PDBG("%s out of order completion going in sw_sq at idx %u\n",
605                      __func__, CQE_WRID_SQ_IDX(hw_cqe));
606                 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
607                 swsqe->cqe = *hw_cqe;
608                 swsqe->complete = 1;
609                 ret = -EAGAIN;
610                 goto flush_wq;
611         }
612
613 proc_cqe:
614         *cqe = *hw_cqe;
615
616         /*
617          * Reap the associated WR(s) that are freed up with this
618          * completion.
619          */
620         if (SQ_TYPE(hw_cqe)) {
621                 int idx = CQE_WRID_SQ_IDX(hw_cqe);
622                 BUG_ON(idx >= wq->sq.size);
623
624                 /*
625                 * Account for any unsignaled completions completed by
626                 * this signaled completion.  In this case, cidx points
627                 * to the first unsignaled one, and idx points to the
628                 * signaled one.  So adjust in_use based on this delta.
629                 * if this is not completing any unsigned wrs, then the
630                 * delta will be 0. Handle wrapping also!
631                 */
632                 if (idx < wq->sq.cidx)
633                         wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
634                 else
635                         wq->sq.in_use -= idx - wq->sq.cidx;
636                 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
637
638                 wq->sq.cidx = (uint16_t)idx;
639                 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
640                 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
641                 t4_sq_consume(wq);
642         } else {
643                 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
644                 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
645                 BUG_ON(t4_rq_empty(wq));
646                 t4_rq_consume(wq);
647                 goto skip_cqe;
648         }
649
650 flush_wq:
651         /*
652          * Flush any completed cqes that are now in-order.
653          */
654         flush_completed_wrs(wq, cq);
655
656 skip_cqe:
657         if (SW_CQE(hw_cqe)) {
658                 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
659                      __func__, cq, cq->cqid, cq->sw_cidx);
660                 t4_swcq_consume(cq);
661         } else {
662                 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
663                      __func__, cq, cq->cqid, cq->cidx);
664                 t4_hwcq_consume(cq);
665         }
666         return ret;
667 }
668
669 /*
670  * Get one cq entry from c4iw and map it to openib.
671  *
672  * Returns:
673  *      0                       cqe returned
674  *      -ENODATA                EMPTY;
675  *      -EAGAIN                 caller must try again
676  *      any other -errno        fatal error
677  */
678 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
679 {
680         struct c4iw_qp *qhp = NULL;
681         struct t4_cqe cqe = {0, 0}, *rd_cqe;
682         struct t4_wq *wq;
683         u32 credit = 0;
684         u8 cqe_flushed;
685         u64 cookie = 0;
686         int ret;
687
688         ret = t4_next_cqe(&chp->cq, &rd_cqe);
689
690         if (ret)
691                 return ret;
692
693         qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
694         if (!qhp)
695                 wq = NULL;
696         else {
697                 spin_lock(&qhp->lock);
698                 wq = &(qhp->wq);
699         }
700         ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
701         if (ret)
702                 goto out;
703
704         wc->wr_id = cookie;
705         wc->qp = &qhp->ibqp;
706         wc->vendor_err = CQE_STATUS(&cqe);
707         wc->wc_flags = 0;
708
709         PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
710              "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
711              CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
712              CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);
713
714         if (CQE_TYPE(&cqe) == 0) {
715                 if (!CQE_STATUS(&cqe))
716                         wc->byte_len = CQE_LEN(&cqe);
717                 else
718                         wc->byte_len = 0;
719                 wc->opcode = IB_WC_RECV;
720                 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
721                     CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
722                         wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
723                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
724                 }
725         } else {
726                 switch (CQE_OPCODE(&cqe)) {
727                 case FW_RI_RDMA_WRITE:
728                         wc->opcode = IB_WC_RDMA_WRITE;
729                         break;
730                 case FW_RI_READ_REQ:
731                         wc->opcode = IB_WC_RDMA_READ;
732                         wc->byte_len = CQE_LEN(&cqe);
733                         break;
734                 case FW_RI_SEND_WITH_INV:
735                 case FW_RI_SEND_WITH_SE_INV:
736                         wc->opcode = IB_WC_SEND;
737                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
738                         break;
739                 case FW_RI_SEND:
740                 case FW_RI_SEND_WITH_SE:
741                         wc->opcode = IB_WC_SEND;
742                         break;
743                 case FW_RI_BIND_MW:
744                         wc->opcode = IB_WC_BIND_MW;
745                         break;
746
747                 case FW_RI_LOCAL_INV:
748                         wc->opcode = IB_WC_LOCAL_INV;
749                         break;
750                 case FW_RI_FAST_REGISTER:
751                         wc->opcode = IB_WC_FAST_REG_MR;
752                         break;
753                 default:
754                         printk(KERN_ERR MOD "Unexpected opcode %d "
755                                "in the CQE received for QPID=0x%0x\n",
756                                CQE_OPCODE(&cqe), CQE_QPID(&cqe));
757                         ret = -EINVAL;
758                         goto out;
759                 }
760         }
761
762         if (cqe_flushed)
763                 wc->status = IB_WC_WR_FLUSH_ERR;
764         else {
765
766                 switch (CQE_STATUS(&cqe)) {
767                 case T4_ERR_SUCCESS:
768                         wc->status = IB_WC_SUCCESS;
769                         break;
770                 case T4_ERR_STAG:
771                         wc->status = IB_WC_LOC_ACCESS_ERR;
772                         break;
773                 case T4_ERR_PDID:
774                         wc->status = IB_WC_LOC_PROT_ERR;
775                         break;
776                 case T4_ERR_QPID:
777                 case T4_ERR_ACCESS:
778                         wc->status = IB_WC_LOC_ACCESS_ERR;
779                         break;
780                 case T4_ERR_WRAP:
781                         wc->status = IB_WC_GENERAL_ERR;
782                         break;
783                 case T4_ERR_BOUND:
784                         wc->status = IB_WC_LOC_LEN_ERR;
785                         break;
786                 case T4_ERR_INVALIDATE_SHARED_MR:
787                 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
788                         wc->status = IB_WC_MW_BIND_ERR;
789                         break;
790                 case T4_ERR_CRC:
791                 case T4_ERR_MARKER:
792                 case T4_ERR_PDU_LEN_ERR:
793                 case T4_ERR_OUT_OF_RQE:
794                 case T4_ERR_DDP_VERSION:
795                 case T4_ERR_RDMA_VERSION:
796                 case T4_ERR_DDP_QUEUE_NUM:
797                 case T4_ERR_MSN:
798                 case T4_ERR_TBIT:
799                 case T4_ERR_MO:
800                 case T4_ERR_MSN_RANGE:
801                 case T4_ERR_IRD_OVERFLOW:
802                 case T4_ERR_OPCODE:
803                 case T4_ERR_INTERNAL_ERR:
804                         wc->status = IB_WC_FATAL_ERR;
805                         break;
806                 case T4_ERR_SWFLUSH:
807                         wc->status = IB_WC_WR_FLUSH_ERR;
808                         break;
809                 default:
810                         printk(KERN_ERR MOD
811                                "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
812                                CQE_STATUS(&cqe), CQE_QPID(&cqe));
813                         ret = -EINVAL;
814                 }
815         }
816 out:
817         if (wq)
818                 spin_unlock(&qhp->lock);
819         return ret;
820 }
821
822 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
823 {
824         struct c4iw_cq *chp;
825         unsigned long flags;
826         int npolled;
827         int err = 0;
828
829         chp = to_c4iw_cq(ibcq);
830
831         spin_lock_irqsave(&chp->lock, flags);
832         for (npolled = 0; npolled < num_entries; ++npolled) {
833                 do {
834                         err = c4iw_poll_cq_one(chp, wc + npolled);
835                 } while (err == -EAGAIN);
836                 if (err)
837                         break;
838         }
839         spin_unlock_irqrestore(&chp->lock, flags);
840         return !err || err == -ENODATA ? npolled : err;
841 }
842
843 int c4iw_destroy_cq(struct ib_cq *ib_cq)
844 {
845         struct c4iw_cq *chp;
846         struct c4iw_ucontext *ucontext;
847
848         PDBG("%s ib_cq %p\n", __func__, ib_cq);
849         chp = to_c4iw_cq(ib_cq);
850
851         remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
852         atomic_dec(&chp->refcnt);
853         wait_event(chp->wait, !atomic_read(&chp->refcnt));
854
855         ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
856                                   : NULL;
857         destroy_cq(&chp->rhp->rdev, &chp->cq,
858                    ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
859         kfree(chp);
860         return 0;
861 }
862
863 struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
864                              int vector, struct ib_ucontext *ib_context,
865                              struct ib_udata *udata)
866 {
867         struct c4iw_dev *rhp;
868         struct c4iw_cq *chp;
869         struct c4iw_create_cq_resp uresp;
870         struct c4iw_ucontext *ucontext = NULL;
871         int ret;
872         size_t memsize, hwentries;
873         struct c4iw_mm_entry *mm, *mm2;
874
875         PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
876
877         rhp = to_c4iw_dev(ibdev);
878
879         chp = kzalloc(sizeof(*chp), GFP_KERNEL);
880         if (!chp)
881                 return ERR_PTR(-ENOMEM);
882
883         if (ib_context)
884                 ucontext = to_c4iw_ucontext(ib_context);
885
886         /* account for the status page. */
887         entries++;
888
889         /* IQ needs one extra entry to differentiate full vs empty. */
890         entries++;
891
892         /*
893          * entries must be multiple of 16 for HW.
894          */
895         entries = roundup(entries, 16);
896
897         /*
898          * Make actual HW queue 2x to avoid cdix_inc overflows.
899          */
900         hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
901
902         /*
903          * Make HW queue at least 64 entries so GTS updates aren't too
904          * frequent.
905          */
906         if (hwentries < 64)
907                 hwentries = 64;
908
909         memsize = hwentries * sizeof *chp->cq.queue;
910
911         /*
912          * memsize must be a multiple of the page size if its a user cq.
913          */
914         if (ucontext) {
915                 memsize = roundup(memsize, PAGE_SIZE);
916                 hwentries = memsize / sizeof *chp->cq.queue;
917                 while (hwentries > T4_MAX_IQ_SIZE) {
918                         memsize -= PAGE_SIZE;
919                         hwentries = memsize / sizeof *chp->cq.queue;
920                 }
921         }
922         chp->cq.size = hwentries;
923         chp->cq.memsize = memsize;
924
925         ret = create_cq(&rhp->rdev, &chp->cq,
926                         ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
927         if (ret)
928                 goto err1;
929
930         chp->rhp = rhp;
931         chp->cq.size--;                         /* status page */
932         chp->ibcq.cqe = entries - 2;
933         spin_lock_init(&chp->lock);
934         spin_lock_init(&chp->comp_handler_lock);
935         atomic_set(&chp->refcnt, 1);
936         init_waitqueue_head(&chp->wait);
937         ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
938         if (ret)
939                 goto err2;
940
941         if (ucontext) {
942                 mm = kmalloc(sizeof *mm, GFP_KERNEL);
943                 if (!mm)
944                         goto err3;
945                 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
946                 if (!mm2)
947                         goto err4;
948
949                 memset(&uresp, 0, sizeof(uresp));
950                 uresp.qid_mask = rhp->rdev.cqmask;
951                 uresp.cqid = chp->cq.cqid;
952                 uresp.size = chp->cq.size;
953                 uresp.memsize = chp->cq.memsize;
954                 spin_lock(&ucontext->mmap_lock);
955                 uresp.key = ucontext->key;
956                 ucontext->key += PAGE_SIZE;
957                 uresp.gts_key = ucontext->key;
958                 ucontext->key += PAGE_SIZE;
959                 spin_unlock(&ucontext->mmap_lock);
960                 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
961                 if (ret)
962                         goto err5;
963
964                 mm->key = uresp.key;
965                 mm->addr = virt_to_phys(chp->cq.queue);
966                 mm->len = chp->cq.memsize;
967                 insert_mmap(ucontext, mm);
968
969                 mm2->key = uresp.gts_key;
970                 mm2->addr = chp->cq.ugts;
971                 mm2->len = PAGE_SIZE;
972                 insert_mmap(ucontext, mm2);
973         }
974         PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
975              __func__, chp->cq.cqid, chp, chp->cq.size,
976              chp->cq.memsize,
977              (unsigned long long) chp->cq.dma_addr);
978         return &chp->ibcq;
979 err5:
980         kfree(mm2);
981 err4:
982         kfree(mm);
983 err3:
984         remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
985 err2:
986         destroy_cq(&chp->rhp->rdev, &chp->cq,
987                    ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
988 err1:
989         kfree(chp);
990         return ERR_PTR(ret);
991 }
992
993 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
994 {
995         return -ENOSYS;
996 }
997
998 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
999 {
1000         struct c4iw_cq *chp;
1001         int ret;
1002         unsigned long flag;
1003
1004         chp = to_c4iw_cq(ibcq);
1005         spin_lock_irqsave(&chp->lock, flag);
1006         ret = t4_arm_cq(&chp->cq,
1007                         (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1008         spin_unlock_irqrestore(&chp->lock, flag);
1009         if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
1010                 ret = 0;
1011         return ret;
1012 }