Linux 2.6.16
[linux-2.6-block.git] / drivers / infiniband / hw / mthca / mthca_cq.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
cd4e8fb4 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
74c2174e 4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
2a1d9b7f
RD
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
1da177e4
LT
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
37 */
38
39#include <linux/init.h>
40#include <linux/hardirq.h>
41
a4d61e84 42#include <rdma/ib_pack.h>
1da177e4
LT
43
44#include "mthca_dev.h"
45#include "mthca_cmd.h"
46#include "mthca_memfree.h"
47
48enum {
49 MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
50};
51
52enum {
53 MTHCA_CQ_ENTRY_SIZE = 0x20
54};
55
56/*
57 * Must be packed because start is 64 bits but only aligned to 32 bits.
58 */
59struct mthca_cq_context {
97f52eb4
SH
60 __be32 flags;
61 __be64 start;
62 __be32 logsize_usrpage;
63 __be32 error_eqn; /* Tavor only */
64 __be32 comp_eqn;
65 __be32 pd;
66 __be32 lkey;
67 __be32 last_notified_index;
68 __be32 solicit_producer_index;
69 __be32 consumer_index;
70 __be32 producer_index;
71 __be32 cqn;
72 __be32 ci_db; /* Arbel only */
73 __be32 state_db; /* Arbel only */
74 u32 reserved;
1da177e4
LT
75} __attribute__((packed));
76
77#define MTHCA_CQ_STATUS_OK ( 0 << 28)
78#define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
79#define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
80#define MTHCA_CQ_FLAG_TR ( 1 << 18)
81#define MTHCA_CQ_FLAG_OI ( 1 << 17)
82#define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
83#define MTHCA_CQ_STATE_ARMED ( 1 << 8)
84#define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
85#define MTHCA_EQ_STATE_FIRED (10 << 8)
86
87enum {
88 MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
89};
90
91enum {
92 SYNDROME_LOCAL_LENGTH_ERR = 0x01,
93 SYNDROME_LOCAL_QP_OP_ERR = 0x02,
94 SYNDROME_LOCAL_EEC_OP_ERR = 0x03,
95 SYNDROME_LOCAL_PROT_ERR = 0x04,
96 SYNDROME_WR_FLUSH_ERR = 0x05,
97 SYNDROME_MW_BIND_ERR = 0x06,
98 SYNDROME_BAD_RESP_ERR = 0x10,
99 SYNDROME_LOCAL_ACCESS_ERR = 0x11,
100 SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
101 SYNDROME_REMOTE_ACCESS_ERR = 0x13,
102 SYNDROME_REMOTE_OP_ERR = 0x14,
103 SYNDROME_RETRY_EXC_ERR = 0x15,
104 SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
105 SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20,
106 SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
107 SYNDROME_REMOTE_ABORTED_ERR = 0x22,
108 SYNDROME_INVAL_EECN_ERR = 0x23,
109 SYNDROME_INVAL_EEC_STATE_ERR = 0x24
110};
111
112struct mthca_cqe {
97f52eb4
SH
113 __be32 my_qpn;
114 __be32 my_ee;
115 __be32 rqpn;
116 __be16 sl_g_mlpath;
117 __be16 rlid;
118 __be32 imm_etype_pkey_eec;
119 __be32 byte_cnt;
120 __be32 wqe;
121 u8 opcode;
122 u8 is_send;
123 u8 reserved;
124 u8 owner;
1da177e4
LT
125};
126
127struct mthca_err_cqe {
97f52eb4
SH
128 __be32 my_qpn;
129 u32 reserved1[3];
130 u8 syndrome;
0f8e8f96 131 u8 vendor_err;
97f52eb4 132 __be16 db_cnt;
0f8e8f96 133 u32 reserved2;
97f52eb4
SH
134 __be32 wqe;
135 u8 opcode;
0f8e8f96 136 u8 reserved3[2];
97f52eb4 137 u8 owner;
1da177e4
LT
138};
139
140#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
141#define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
142
143#define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
144#define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
145#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
146#define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
147#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
148
149#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
150#define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
151#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
152
153static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
154{
155 if (cq->is_direct)
156 return cq->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
157 else
158 return cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
159 + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
160}
161
162static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i)
163{
164 struct mthca_cqe *cqe = get_cqe(cq, i);
165 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
166}
167
168static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
169{
170 return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe);
171}
172
173static inline void set_cqe_hw(struct mthca_cqe *cqe)
174{
175 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
176}
177
bb2af78b
RD
178static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
179{
180 __be32 *cqe = cqe_ptr;
181
182 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
183 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
184 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
185 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
186 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
187}
188
1da177e4
LT
189/*
190 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
191 * should be correct before calling update_cons_index().
192 */
193static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
194 int incr)
195{
97f52eb4 196 __be32 doorbell[2];
1da177e4 197
d10ddbf6 198 if (mthca_is_memfree(dev)) {
1da177e4
LT
199 *cq->set_ci_db = cpu_to_be32(cq->cons_index);
200 wmb();
201 } else {
202 doorbell[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);
203 doorbell[1] = cpu_to_be32(incr - 1);
204
205 mthca_write64(doorbell,
206 dev->kar + MTHCA_CQ_DOORBELL,
207 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
208 }
209}
210
affcd505 211void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
1da177e4
LT
212{
213 struct mthca_cq *cq;
214
215 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
216
217 if (!cq) {
218 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
219 return;
220 }
221
222 ++cq->arm_sn;
223
224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
225}
226
affcd505
MT
227void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
228 enum ib_event_type event_type)
229{
230 struct mthca_cq *cq;
231 struct ib_event event;
232
233 spin_lock(&dev->cq_table.lock);
234
235 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
236
237 if (cq)
238 atomic_inc(&cq->refcount);
239 spin_unlock(&dev->cq_table.lock);
240
241 if (!cq) {
242 mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
243 return;
244 }
245
246 event.device = &dev->ib_dev;
247 event.event = event_type;
248 event.element.cq = &cq->ibcq;
249 if (cq->ibcq.event_handler)
250 cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
251
252 if (atomic_dec_and_test(&cq->refcount))
253 wake_up(&cq->wait);
254}
255
576d2e4e
JM
256static inline int is_recv_cqe(struct mthca_cqe *cqe)
257{
258 if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
259 MTHCA_ERROR_CQE_OPCODE_MASK)
260 return !(cqe->opcode & 0x01);
261 else
262 return !(cqe->is_send & 0x80);
263}
264
ec34a922
RD
265void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
266 struct mthca_srq *srq)
1da177e4
LT
267{
268 struct mthca_cq *cq;
269 struct mthca_cqe *cqe;
64044bcf 270 u32 prod_index;
1da177e4
LT
271 int nfreed = 0;
272
273 spin_lock_irq(&dev->cq_table.lock);
274 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
275 if (cq)
276 atomic_inc(&cq->refcount);
277 spin_unlock_irq(&dev->cq_table.lock);
278
279 if (!cq)
280 return;
281
282 spin_lock_irq(&cq->lock);
283
284 /*
285 * First we need to find the current producer index, so we
286 * know where to start cleaning from. It doesn't matter if HW
287 * adds new entries after this loop -- the QP we're worried
288 * about is already in RESET, so the new entries won't come
289 * from our QP and therefore don't need to be checked.
290 */
291 for (prod_index = cq->cons_index;
292 cqe_sw(cq, prod_index & cq->ibcq.cqe);
293 ++prod_index)
294 if (prod_index == cq->cons_index + cq->ibcq.cqe)
295 break;
296
297 if (0)
298 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
299 qpn, cqn, cq->cons_index, prod_index);
300
301 /*
302 * Now sweep backwards through the CQ, removing CQ entries
303 * that match our QP by copying older entries on top of them.
304 */
64044bcf
RD
305 while ((int) --prod_index - (int) cq->cons_index >= 0) {
306 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
ec34a922 307 if (cqe->my_qpn == cpu_to_be32(qpn)) {
576d2e4e 308 if (srq && is_recv_cqe(cqe))
ec34a922 309 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
1da177e4 310 ++nfreed;
64044bcf
RD
311 } else if (nfreed)
312 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
313 cqe, MTHCA_CQ_ENTRY_SIZE);
1da177e4
LT
314 }
315
316 if (nfreed) {
317 wmb();
318 cq->cons_index += nfreed;
319 update_cons_index(dev, cq, nfreed);
320 }
321
322 spin_unlock_irq(&cq->lock);
323 if (atomic_dec_and_test(&cq->refcount))
324 wake_up(&cq->wait);
325}
326
327static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
328 struct mthca_qp *qp, int wqe_index, int is_send,
329 struct mthca_err_cqe *cqe,
330 struct ib_wc *entry, int *free_cqe)
331{
332 int err;
333 int dbd;
97f52eb4 334 __be32 new_wqe;
1da177e4 335
bb2af78b
RD
336 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
337 mthca_dbg(dev, "local QP operation err "
338 "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
339 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
340 cq->cqn, cq->cons_index);
341 dump_cqe(dev, cqe);
1da177e4
LT
342 }
343
344 /*
0f8e8f96
MT
345 * For completions in error, only work request ID, status, vendor error
346 * (and freed resource count for RD) have to be set.
1da177e4
LT
347 */
348 switch (cqe->syndrome) {
349 case SYNDROME_LOCAL_LENGTH_ERR:
350 entry->status = IB_WC_LOC_LEN_ERR;
351 break;
352 case SYNDROME_LOCAL_QP_OP_ERR:
353 entry->status = IB_WC_LOC_QP_OP_ERR;
354 break;
355 case SYNDROME_LOCAL_EEC_OP_ERR:
356 entry->status = IB_WC_LOC_EEC_OP_ERR;
357 break;
358 case SYNDROME_LOCAL_PROT_ERR:
359 entry->status = IB_WC_LOC_PROT_ERR;
360 break;
361 case SYNDROME_WR_FLUSH_ERR:
362 entry->status = IB_WC_WR_FLUSH_ERR;
363 break;
364 case SYNDROME_MW_BIND_ERR:
365 entry->status = IB_WC_MW_BIND_ERR;
366 break;
367 case SYNDROME_BAD_RESP_ERR:
368 entry->status = IB_WC_BAD_RESP_ERR;
369 break;
370 case SYNDROME_LOCAL_ACCESS_ERR:
371 entry->status = IB_WC_LOC_ACCESS_ERR;
372 break;
373 case SYNDROME_REMOTE_INVAL_REQ_ERR:
374 entry->status = IB_WC_REM_INV_REQ_ERR;
375 break;
376 case SYNDROME_REMOTE_ACCESS_ERR:
377 entry->status = IB_WC_REM_ACCESS_ERR;
378 break;
379 case SYNDROME_REMOTE_OP_ERR:
380 entry->status = IB_WC_REM_OP_ERR;
381 break;
382 case SYNDROME_RETRY_EXC_ERR:
383 entry->status = IB_WC_RETRY_EXC_ERR;
384 break;
385 case SYNDROME_RNR_RETRY_EXC_ERR:
386 entry->status = IB_WC_RNR_RETRY_EXC_ERR;
387 break;
388 case SYNDROME_LOCAL_RDD_VIOL_ERR:
389 entry->status = IB_WC_LOC_RDD_VIOL_ERR;
390 break;
391 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
392 entry->status = IB_WC_REM_INV_RD_REQ_ERR;
393 break;
394 case SYNDROME_REMOTE_ABORTED_ERR:
395 entry->status = IB_WC_REM_ABORT_ERR;
396 break;
397 case SYNDROME_INVAL_EECN_ERR:
398 entry->status = IB_WC_INV_EECN_ERR;
399 break;
400 case SYNDROME_INVAL_EEC_STATE_ERR:
401 entry->status = IB_WC_INV_EEC_STATE_ERR;
402 break;
403 default:
404 entry->status = IB_WC_GENERAL_ERR;
405 break;
406 }
407
0f8e8f96
MT
408 entry->vendor_err = cqe->vendor_err;
409
288bdeb4
RD
410 /*
411 * Mem-free HCAs always generate one CQE per WQE, even in the
412 * error case, so we don't have to check the doorbell count, etc.
413 */
414 if (mthca_is_memfree(dev))
415 return 0;
416
1da177e4
LT
417 err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
418 if (err)
419 return err;
420
421 /*
422 * If we're at the end of the WQE chain, or we've used up our
423 * doorbell count, free the CQE. Otherwise just update it for
424 * the next poll operation.
425 */
288bdeb4 426 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
1da177e4
LT
427 return 0;
428
429 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
430 cqe->wqe = new_wqe;
431 cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
432
433 *free_cqe = 0;
434
435 return 0;
436}
437
1da177e4
LT
438static inline int mthca_poll_one(struct mthca_dev *dev,
439 struct mthca_cq *cq,
440 struct mthca_qp **cur_qp,
441 int *freed,
442 struct ib_wc *entry)
443{
444 struct mthca_wq *wq;
445 struct mthca_cqe *cqe;
446 int wqe_index;
447 int is_error;
448 int is_send;
449 int free_cqe = 1;
450 int err = 0;
451
452 cqe = next_cqe_sw(cq);
453 if (!cqe)
454 return -EAGAIN;
455
456 /*
457 * Make sure we read CQ entry contents after we've checked the
458 * ownership bit.
459 */
460 rmb();
461
462 if (0) {
463 mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
464 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
465 be32_to_cpu(cqe->wqe));
bb2af78b 466 dump_cqe(dev, cqe);
1da177e4
LT
467 }
468
469 is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
470 MTHCA_ERROR_CQE_OPCODE_MASK;
471 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
472
473 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
474 /*
475 * We do not have to take the QP table lock here,
476 * because CQs will be locked while QPs are removed
477 * from the table.
478 */
479 *cur_qp = mthca_array_get(&dev->qp_table.qp,
480 be32_to_cpu(cqe->my_qpn) &
481 (dev->limits.num_qps - 1));
482 if (!*cur_qp) {
483 mthca_warn(dev, "CQ entry for unknown QP %06x\n",
484 be32_to_cpu(cqe->my_qpn) & 0xffffff);
485 err = -EINVAL;
486 goto out;
487 }
488 }
489
490 entry->qp_num = (*cur_qp)->qpn;
491
492 if (is_send) {
493 wq = &(*cur_qp)->sq;
494 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
495 >> wq->wqe_shift);
496 entry->wr_id = (*cur_qp)->wrid[wqe_index +
497 (*cur_qp)->rq.max];
ec34a922
RD
498 } else if ((*cur_qp)->ibqp.srq) {
499 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
500 u32 wqe = be32_to_cpu(cqe->wqe);
501 wq = NULL;
502 wqe_index = wqe >> srq->wqe_shift;
503 entry->wr_id = srq->wrid[wqe_index];
504 mthca_free_srq_wqe(srq, wqe);
1da177e4
LT
505 } else {
506 wq = &(*cur_qp)->rq;
507 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
508 entry->wr_id = (*cur_qp)->wrid[wqe_index];
509 }
510
ec34a922
RD
511 if (wq) {
512 if (wq->last_comp < wqe_index)
513 wq->tail += wqe_index - wq->last_comp;
514 else
515 wq->tail += wqe_index + wq->max - wq->last_comp;
1da177e4 516
ec34a922
RD
517 wq->last_comp = wqe_index;
518 }
1da177e4
LT
519
520 if (is_error) {
521 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
522 (struct mthca_err_cqe *) cqe,
523 entry, &free_cqe);
524 goto out;
525 }
526
527 if (is_send) {
2a4443a6
MT
528 entry->wc_flags = 0;
529 switch (cqe->opcode) {
530 case MTHCA_OPCODE_RDMA_WRITE:
531 entry->opcode = IB_WC_RDMA_WRITE;
532 break;
533 case MTHCA_OPCODE_RDMA_WRITE_IMM:
534 entry->opcode = IB_WC_RDMA_WRITE;
535 entry->wc_flags |= IB_WC_WITH_IMM;
536 break;
537 case MTHCA_OPCODE_SEND:
538 entry->opcode = IB_WC_SEND;
539 break;
540 case MTHCA_OPCODE_SEND_IMM:
541 entry->opcode = IB_WC_SEND;
542 entry->wc_flags |= IB_WC_WITH_IMM;
543 break;
544 case MTHCA_OPCODE_RDMA_READ:
545 entry->opcode = IB_WC_RDMA_READ;
546 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
547 break;
548 case MTHCA_OPCODE_ATOMIC_CS:
549 entry->opcode = IB_WC_COMP_SWAP;
550 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
551 break;
552 case MTHCA_OPCODE_ATOMIC_FA:
553 entry->opcode = IB_WC_FETCH_ADD;
554 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
555 break;
556 case MTHCA_OPCODE_BIND_MW:
557 entry->opcode = IB_WC_BIND_MW;
558 break;
559 default:
560 entry->opcode = MTHCA_OPCODE_INVALID;
561 break;
562 }
1da177e4
LT
563 } else {
564 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
565 switch (cqe->opcode & 0x1f) {
566 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
567 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
568 entry->wc_flags = IB_WC_WITH_IMM;
569 entry->imm_data = cqe->imm_etype_pkey_eec;
570 entry->opcode = IB_WC_RECV;
571 break;
572 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
573 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
574 entry->wc_flags = IB_WC_WITH_IMM;
575 entry->imm_data = cqe->imm_etype_pkey_eec;
576 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
577 break;
578 default:
579 entry->wc_flags = 0;
580 entry->opcode = IB_WC_RECV;
581 break;
582 }
583 entry->slid = be16_to_cpu(cqe->rlid);
584 entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
585 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
586 entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
587 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
588 entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
589 IB_WC_GRH : 0;
590 }
591
592 entry->status = IB_WC_SUCCESS;
593
594 out:
595 if (likely(free_cqe)) {
596 set_cqe_hw(cqe);
597 ++(*freed);
598 ++cq->cons_index;
599 }
600
601 return err;
602}
603
604int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
605 struct ib_wc *entry)
606{
607 struct mthca_dev *dev = to_mdev(ibcq->device);
608 struct mthca_cq *cq = to_mcq(ibcq);
609 struct mthca_qp *qp = NULL;
610 unsigned long flags;
611 int err = 0;
612 int freed = 0;
613 int npolled;
614
615 spin_lock_irqsave(&cq->lock, flags);
616
617 for (npolled = 0; npolled < num_entries; ++npolled) {
618 err = mthca_poll_one(dev, cq, &qp,
619 &freed, entry + npolled);
620 if (err)
621 break;
622 }
623
624 if (freed) {
625 wmb();
626 update_cons_index(dev, cq, freed);
627 }
628
629 spin_unlock_irqrestore(&cq->lock, flags);
630
631 return err == 0 || err == -EAGAIN ? npolled : err;
632}
633
634int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
635{
97f52eb4 636 __be32 doorbell[2];
1da177e4
LT
637
638 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ?
639 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
640 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
641 to_mcq(cq)->cqn);
97f52eb4 642 doorbell[1] = (__force __be32) 0xffffffff;
1da177e4
LT
643
644 mthca_write64(doorbell,
645 to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
646 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));
647
648 return 0;
649}
650
651int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
652{
653 struct mthca_cq *cq = to_mcq(ibcq);
97f52eb4 654 __be32 doorbell[2];
1da177e4 655 u32 sn;
97f52eb4 656 __be32 ci;
1da177e4
LT
657
658 sn = cq->arm_sn & 3;
659 ci = cpu_to_be32(cq->cons_index);
660
661 doorbell[0] = ci;
662 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
663 (notify == IB_CQ_SOLICITED ? 1 : 2));
664
665 mthca_write_db_rec(doorbell, cq->arm_db);
666
667 /*
668 * Make sure that the doorbell record in host memory is
669 * written before ringing the doorbell via PCI MMIO.
670 */
671 wmb();
672
673 doorbell[0] = cpu_to_be32((sn << 28) |
674 (notify == IB_CQ_SOLICITED ?
675 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
676 MTHCA_ARBEL_CQ_DB_REQ_NOT) |
677 cq->cqn);
678 doorbell[1] = ci;
679
680 mthca_write64(doorbell,
681 to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
682 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));
683
684 return 0;
685}
686
687static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
688{
87b81670
RD
689 mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
690 &cq->queue, cq->is_direct, &cq->mr);
1da177e4
LT
691}
692
693int mthca_init_cq(struct mthca_dev *dev, int nent,
74c2174e 694 struct mthca_ucontext *ctx, u32 pdn,
1da177e4
LT
695 struct mthca_cq *cq)
696{
697 int size = nent * MTHCA_CQ_ENTRY_SIZE;
ed878458 698 struct mthca_mailbox *mailbox;
1da177e4
LT
699 struct mthca_cq_context *cq_context;
700 int err = -ENOMEM;
701 u8 status;
702 int i;
703
704 might_sleep();
705
74c2174e
RD
706 cq->ibcq.cqe = nent - 1;
707 cq->is_kernel = !ctx;
1da177e4
LT
708
709 cq->cqn = mthca_alloc(&dev->cq_table.alloc);
710 if (cq->cqn == -1)
711 return -ENOMEM;
712
d10ddbf6 713 if (mthca_is_memfree(dev)) {
1da177e4
LT
714 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
715 if (err)
716 goto err_out;
717
74c2174e
RD
718 if (cq->is_kernel) {
719 cq->arm_sn = 1;
720
721 err = -ENOMEM;
1da177e4 722
74c2174e
RD
723 cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
724 cq->cqn, &cq->set_ci_db);
725 if (cq->set_ci_db_index < 0)
726 goto err_out_icm;
1da177e4 727
74c2174e
RD
728 cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
729 cq->cqn, &cq->arm_db);
730 if (cq->arm_db_index < 0)
731 goto err_out_ci;
732 }
1da177e4
LT
733 }
734
ed878458
RD
735 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
736 if (IS_ERR(mailbox))
737 goto err_out_arm;
1da177e4 738
ed878458 739 cq_context = mailbox->buf;
1da177e4 740
74c2174e 741 if (cq->is_kernel) {
87b81670
RD
742 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
743 &cq->queue, &cq->is_direct,
744 &dev->driver_pd, 1, &cq->mr);
74c2174e
RD
745 if (err)
746 goto err_out_mailbox;
1da177e4 747
74c2174e
RD
748 for (i = 0; i < nent; ++i)
749 set_cqe_hw(get_cqe(cq, i));
750 }
1da177e4
LT
751
752 spin_lock_init(&cq->lock);
753 atomic_set(&cq->refcount, 1);
754 init_waitqueue_head(&cq->wait);
755
756 memset(cq_context, 0, sizeof *cq_context);
757 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
758 MTHCA_CQ_STATE_DISARMED |
759 MTHCA_CQ_FLAG_TR);
74c2174e
RD
760 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
761 if (ctx)
762 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
763 else
764 cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
1da177e4
LT
765 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
766 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
74c2174e 767 cq_context->pd = cpu_to_be32(pdn);
1da177e4
LT
768 cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey);
769 cq_context->cqn = cpu_to_be32(cq->cqn);
770
d10ddbf6 771 if (mthca_is_memfree(dev)) {
1da177e4
LT
772 cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index);
773 cq_context->state_db = cpu_to_be32(cq->arm_db_index);
774 }
775
ed878458 776 err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
1da177e4
LT
777 if (err) {
778 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
779 goto err_out_free_mr;
780 }
781
782 if (status) {
783 mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
784 status);
785 err = -EINVAL;
786 goto err_out_free_mr;
787 }
788
789 spin_lock_irq(&dev->cq_table.lock);
790 if (mthca_array_set(&dev->cq_table.cq,
791 cq->cqn & (dev->limits.num_cqs - 1),
792 cq)) {
793 spin_unlock_irq(&dev->cq_table.lock);
794 goto err_out_free_mr;
795 }
796 spin_unlock_irq(&dev->cq_table.lock);
797
798 cq->cons_index = 0;
799
ed878458 800 mthca_free_mailbox(dev, mailbox);
1da177e4
LT
801
802 return 0;
803
804err_out_free_mr:
87b81670 805 if (cq->is_kernel)
74c2174e 806 mthca_free_cq_buf(dev, cq);
1da177e4
LT
807
808err_out_mailbox:
ed878458 809 mthca_free_mailbox(dev, mailbox);
1da177e4 810
ed878458 811err_out_arm:
74c2174e 812 if (cq->is_kernel && mthca_is_memfree(dev))
b635fa21 813 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
1da177e4
LT
814
815err_out_ci:
74c2174e 816 if (cq->is_kernel && mthca_is_memfree(dev))
b635fa21 817 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
1da177e4
LT
818
819err_out_icm:
820 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
821
822err_out:
823 mthca_free(&dev->cq_table.alloc, cq->cqn);
824
825 return err;
826}
827
828void mthca_free_cq(struct mthca_dev *dev,
829 struct mthca_cq *cq)
830{
ed878458 831 struct mthca_mailbox *mailbox;
1da177e4
LT
832 int err;
833 u8 status;
834
835 might_sleep();
836
ed878458
RD
837 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
838 if (IS_ERR(mailbox)) {
1da177e4
LT
839 mthca_warn(dev, "No memory for mailbox to free CQ.\n");
840 return;
841 }
842
ed878458 843 err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
1da177e4
LT
844 if (err)
845 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
846 else if (status)
ed878458 847 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
1da177e4
LT
848
849 if (0) {
97f52eb4 850 __be32 *ctx = mailbox->buf;
1da177e4
LT
851 int j;
852
853 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
74c2174e
RD
854 cq->cqn, cq->cons_index,
855 cq->is_kernel ? !!next_cqe_sw(cq) : 0);
1da177e4
LT
856 for (j = 0; j < 16; ++j)
857 printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
858 }
859
860 spin_lock_irq(&dev->cq_table.lock);
861 mthca_array_clear(&dev->cq_table.cq,
862 cq->cqn & (dev->limits.num_cqs - 1));
863 spin_unlock_irq(&dev->cq_table.lock);
864
865 if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
866 synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);
867 else
868 synchronize_irq(dev->pdev->irq);
869
870 atomic_dec(&cq->refcount);
871 wait_event(cq->wait, !atomic_read(&cq->refcount));
872
74c2174e 873 if (cq->is_kernel) {
74c2174e
RD
874 mthca_free_cq_buf(dev, cq);
875 if (mthca_is_memfree(dev)) {
876 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
877 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
878 }
1da177e4
LT
879 }
880
a03a5a67 881 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
1da177e4 882 mthca_free(&dev->cq_table.alloc, cq->cqn);
ed878458 883 mthca_free_mailbox(dev, mailbox);
1da177e4
LT
884}
885
886int __devinit mthca_init_cq_table(struct mthca_dev *dev)
887{
888 int err;
889
890 spin_lock_init(&dev->cq_table.lock);
891
892 err = mthca_alloc_init(&dev->cq_table.alloc,
893 dev->limits.num_cqs,
894 (1 << 24) - 1,
895 dev->limits.reserved_cqs);
896 if (err)
897 return err;
898
899 err = mthca_array_init(&dev->cq_table.cq,
900 dev->limits.num_cqs);
901 if (err)
902 mthca_alloc_cleanup(&dev->cq_table.alloc);
903
904 return err;
905}
906
907void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev)
908{
909 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
910 mthca_alloc_cleanup(&dev->cq_table.alloc);
911}