Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include "iw_cxgb4.h" | |
34 | ||
35 | static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |
36 | struct c4iw_dev_ucontext *uctx) | |
37 | { | |
38 | struct fw_ri_res_wr *res_wr; | |
39 | struct fw_ri_res *res; | |
40 | int wr_len; | |
41 | struct c4iw_wr_wait wr_wait; | |
42 | struct sk_buff *skb; | |
43 | int ret; | |
44 | ||
45 | wr_len = sizeof *res_wr + sizeof *res; | |
d3c814e8 | 46 | skb = alloc_skb(wr_len, GFP_KERNEL); |
cfdda9d7 SW |
47 | if (!skb) |
48 | return -ENOMEM; | |
49 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
50 | ||
51 | res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); | |
52 | memset(res_wr, 0, wr_len); | |
53 | res_wr->op_nres = cpu_to_be32( | |
e2ac9628 | 54 | FW_WR_OP_V(FW_RI_RES_WR) | |
cf7fe64a | 55 | FW_RI_RES_WR_NRES_V(1) | |
e2ac9628 | 56 | FW_WR_COMPL_F); |
cfdda9d7 | 57 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
6198dd8d | 58 | res_wr->cookie = (uintptr_t)&wr_wait; |
cfdda9d7 SW |
59 | res = res_wr->res; |
60 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; | |
61 | res->u.cq.op = FW_RI_RES_OP_RESET; | |
62 | res->u.cq.iqid = cpu_to_be32(cq->cqid); | |
63 | ||
64 | c4iw_init_wr_wait(&wr_wait); | |
65 | ret = c4iw_ofld_send(rdev, skb); | |
66 | if (!ret) { | |
aadc4df3 | 67 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); |
cfdda9d7 SW |
68 | } |
69 | ||
70 | kfree(cq->sw_queue); | |
71 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
72 | cq->memsize, cq->queue, | |
f38926aa | 73 | dma_unmap_addr(cq, mapping)); |
cfdda9d7 SW |
74 | c4iw_put_cqid(rdev, cq->cqid, uctx); |
75 | return ret; | |
76 | } | |
77 | ||
78 | static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |
79 | struct c4iw_dev_ucontext *uctx) | |
80 | { | |
81 | struct fw_ri_res_wr *res_wr; | |
82 | struct fw_ri_res *res; | |
83 | int wr_len; | |
84 | int user = (uctx != &rdev->uctx); | |
85 | struct c4iw_wr_wait wr_wait; | |
86 | int ret; | |
87 | struct sk_buff *skb; | |
88 | ||
89 | cq->cqid = c4iw_get_cqid(rdev, uctx); | |
90 | if (!cq->cqid) { | |
91 | ret = -ENOMEM; | |
92 | goto err1; | |
93 | } | |
94 | ||
95 | if (!user) { | |
96 | cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); | |
97 | if (!cq->sw_queue) { | |
98 | ret = -ENOMEM; | |
99 | goto err2; | |
100 | } | |
101 | } | |
102 | cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, | |
103 | &cq->dma_addr, GFP_KERNEL); | |
104 | if (!cq->queue) { | |
105 | ret = -ENOMEM; | |
106 | goto err3; | |
107 | } | |
f38926aa | 108 | dma_unmap_addr_set(cq, mapping, cq->dma_addr); |
cfdda9d7 SW |
109 | memset(cq->queue, 0, cq->memsize); |
110 | ||
111 | /* build fw_ri_res_wr */ | |
112 | wr_len = sizeof *res_wr + sizeof *res; | |
113 | ||
d3c814e8 | 114 | skb = alloc_skb(wr_len, GFP_KERNEL); |
cfdda9d7 SW |
115 | if (!skb) { |
116 | ret = -ENOMEM; | |
117 | goto err4; | |
118 | } | |
119 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
120 | ||
121 | res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); | |
122 | memset(res_wr, 0, wr_len); | |
123 | res_wr->op_nres = cpu_to_be32( | |
e2ac9628 | 124 | FW_WR_OP_V(FW_RI_RES_WR) | |
cf7fe64a | 125 | FW_RI_RES_WR_NRES_V(1) | |
e2ac9628 | 126 | FW_WR_COMPL_F); |
cfdda9d7 | 127 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
6198dd8d | 128 | res_wr->cookie = (uintptr_t)&wr_wait; |
cfdda9d7 SW |
129 | res = res_wr->res; |
130 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; | |
131 | res->u.cq.op = FW_RI_RES_OP_WRITE; | |
132 | res->u.cq.iqid = cpu_to_be32(cq->cqid); | |
133 | res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( | |
cf7fe64a HS |
134 | FW_RI_RES_WR_IQANUS_V(0) | |
135 | FW_RI_RES_WR_IQANUD_V(1) | | |
136 | FW_RI_RES_WR_IQANDST_F | | |
137 | FW_RI_RES_WR_IQANDSTINDEX_V( | |
cf38be6d | 138 | rdev->lldi.ciq_ids[cq->vector])); |
cfdda9d7 | 139 | res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( |
cf7fe64a HS |
140 | FW_RI_RES_WR_IQDROPRSS_F | |
141 | FW_RI_RES_WR_IQPCIECH_V(2) | | |
142 | FW_RI_RES_WR_IQINTCNTTHRESH_V(0) | | |
143 | FW_RI_RES_WR_IQO_F | | |
144 | FW_RI_RES_WR_IQESIZE_V(1)); | |
cfdda9d7 SW |
145 | res->u.cq.iqsize = cpu_to_be16(cq->size); |
146 | res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); | |
147 | ||
148 | c4iw_init_wr_wait(&wr_wait); | |
149 | ||
150 | ret = c4iw_ofld_send(rdev, skb); | |
151 | if (ret) | |
152 | goto err4; | |
153 | PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait); | |
aadc4df3 | 154 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); |
cfdda9d7 SW |
155 | if (ret) |
156 | goto err4; | |
157 | ||
158 | cq->gen = 1; | |
159 | cq->gts = rdev->lldi.gts_reg; | |
160 | cq->rdev = rdev; | |
161 | if (user) { | |
162 | cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | |
163 | (cq->cqid << rdev->cqshift); | |
164 | cq->ugts &= PAGE_MASK; | |
165 | } | |
166 | return 0; | |
167 | err4: | |
168 | dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, | |
f38926aa | 169 | dma_unmap_addr(cq, mapping)); |
cfdda9d7 SW |
170 | err3: |
171 | kfree(cq->sw_queue); | |
172 | err2: | |
173 | c4iw_put_cqid(rdev, cq->cqid, uctx); | |
174 | err1: | |
175 | return ret; | |
176 | } | |
177 | ||
178 | static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) | |
179 | { | |
180 | struct t4_cqe cqe; | |
181 | ||
182 | PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, | |
183 | wq, cq, cq->sw_cidx, cq->sw_pidx); | |
184 | memset(&cqe, 0, sizeof(cqe)); | |
a56c66e8 HS |
185 | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | |
186 | CQE_OPCODE_V(FW_RI_SEND) | | |
187 | CQE_TYPE_V(0) | | |
188 | CQE_SWCQE_V(1) | | |
189 | CQE_QPID_V(wq->sq.qid)); | |
190 | cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); | |
cfdda9d7 SW |
191 | cq->sw_queue[cq->sw_pidx] = cqe; |
192 | t4_swcq_produce(cq); | |
193 | } | |
194 | ||
195 | int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) | |
196 | { | |
197 | int flushed = 0; | |
198 | int in_use = wq->rq.in_use - count; | |
199 | ||
200 | BUG_ON(in_use < 0); | |
201 | PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, | |
202 | wq, cq, wq->rq.in_use, count); | |
203 | while (in_use--) { | |
204 | insert_recv_cqe(wq, cq); | |
205 | flushed++; | |
206 | } | |
207 | return flushed; | |
208 | } | |
209 | ||
210 | static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, | |
211 | struct t4_swsqe *swcqe) | |
212 | { | |
213 | struct t4_cqe cqe; | |
214 | ||
215 | PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, | |
216 | wq, cq, cq->sw_cidx, cq->sw_pidx); | |
217 | memset(&cqe, 0, sizeof(cqe)); | |
a56c66e8 HS |
218 | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | |
219 | CQE_OPCODE_V(swcqe->opcode) | | |
220 | CQE_TYPE_V(1) | | |
221 | CQE_SWCQE_V(1) | | |
222 | CQE_QPID_V(wq->sq.qid)); | |
cfdda9d7 | 223 | CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; |
a56c66e8 | 224 | cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); |
cfdda9d7 SW |
225 | cq->sw_queue[cq->sw_pidx] = cqe; |
226 | t4_swcq_produce(cq); | |
227 | } | |
228 | ||
1cf24dce SW |
229 | static void advance_oldest_read(struct t4_wq *wq); |
230 | ||
231 | int c4iw_flush_sq(struct c4iw_qp *qhp) | |
cfdda9d7 SW |
232 | { |
233 | int flushed = 0; | |
1cf24dce SW |
234 | struct t4_wq *wq = &qhp->wq; |
235 | struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); | |
236 | struct t4_cq *cq = &chp->cq; | |
237 | int idx; | |
238 | struct t4_swsqe *swsqe; | |
1cf24dce SW |
239 | |
240 | if (wq->sq.flush_cidx == -1) | |
241 | wq->sq.flush_cidx = wq->sq.cidx; | |
242 | idx = wq->sq.flush_cidx; | |
243 | BUG_ON(idx >= wq->sq.size); | |
244 | while (idx != wq->sq.pidx) { | |
b4e2901c SW |
245 | swsqe = &wq->sq.sw_sq[idx]; |
246 | BUG_ON(swsqe->flushed); | |
247 | swsqe->flushed = 1; | |
248 | insert_sq_cqe(wq, cq, swsqe); | |
249 | if (wq->sq.oldest_read == swsqe) { | |
250 | BUG_ON(swsqe->opcode != FW_RI_READ_REQ); | |
251 | advance_oldest_read(wq); | |
1cf24dce | 252 | } |
b4e2901c | 253 | flushed++; |
1cf24dce SW |
254 | if (++idx == wq->sq.size) |
255 | idx = 0; | |
cfdda9d7 | 256 | } |
1cf24dce SW |
257 | wq->sq.flush_cidx += flushed; |
258 | if (wq->sq.flush_cidx >= wq->sq.size) | |
259 | wq->sq.flush_cidx -= wq->sq.size; | |
cfdda9d7 SW |
260 | return flushed; |
261 | } | |
262 | ||
1cf24dce SW |
263 | static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) |
264 | { | |
265 | struct t4_swsqe *swsqe; | |
266 | int cidx; | |
267 | ||
268 | if (wq->sq.flush_cidx == -1) | |
269 | wq->sq.flush_cidx = wq->sq.cidx; | |
270 | cidx = wq->sq.flush_cidx; | |
271 | BUG_ON(cidx > wq->sq.size); | |
272 | ||
273 | while (cidx != wq->sq.pidx) { | |
274 | swsqe = &wq->sq.sw_sq[cidx]; | |
275 | if (!swsqe->signaled) { | |
276 | if (++cidx == wq->sq.size) | |
277 | cidx = 0; | |
278 | } else if (swsqe->complete) { | |
279 | ||
280 | BUG_ON(swsqe->flushed); | |
281 | ||
282 | /* | |
283 | * Insert this completed cqe into the swcq. | |
284 | */ | |
285 | PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n", | |
286 | __func__, cidx, cq->sw_pidx); | |
a56c66e8 | 287 | swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); |
1cf24dce SW |
288 | cq->sw_queue[cq->sw_pidx] = swsqe->cqe; |
289 | t4_swcq_produce(cq); | |
290 | swsqe->flushed = 1; | |
291 | if (++cidx == wq->sq.size) | |
292 | cidx = 0; | |
293 | wq->sq.flush_cidx = cidx; | |
294 | } else | |
295 | break; | |
296 | } | |
297 | } | |
298 | ||
299 | static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, | |
300 | struct t4_cqe *read_cqe) | |
301 | { | |
302 | read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; | |
303 | read_cqe->len = htonl(wq->sq.oldest_read->read_len); | |
a56c66e8 HS |
304 | read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) | |
305 | CQE_SWCQE_V(SW_CQE(hw_cqe)) | | |
306 | CQE_OPCODE_V(FW_RI_READ_REQ) | | |
307 | CQE_TYPE_V(1)); | |
1cf24dce SW |
308 | read_cqe->bits_type_ts = hw_cqe->bits_type_ts; |
309 | } | |
310 | ||
311 | static void advance_oldest_read(struct t4_wq *wq) | |
312 | { | |
313 | ||
314 | u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; | |
315 | ||
316 | if (rptr == wq->sq.size) | |
317 | rptr = 0; | |
318 | while (rptr != wq->sq.pidx) { | |
319 | wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; | |
320 | ||
321 | if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) | |
322 | return; | |
323 | if (++rptr == wq->sq.size) | |
324 | rptr = 0; | |
325 | } | |
326 | wq->sq.oldest_read = NULL; | |
327 | } | |
328 | ||
cfdda9d7 SW |
329 | /* |
330 | * Move all CQEs from the HWCQ into the SWCQ. | |
1cf24dce SW |
331 | * Deal with out-of-order and/or completions that complete |
332 | * prior unsignalled WRs. | |
cfdda9d7 | 333 | */ |
1cf24dce | 334 | void c4iw_flush_hw_cq(struct c4iw_cq *chp) |
cfdda9d7 | 335 | { |
1cf24dce SW |
336 | struct t4_cqe *hw_cqe, *swcqe, read_cqe; |
337 | struct c4iw_qp *qhp; | |
338 | struct t4_swsqe *swsqe; | |
cfdda9d7 SW |
339 | int ret; |
340 | ||
1cf24dce SW |
341 | PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid); |
342 | ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); | |
343 | ||
344 | /* | |
345 | * This logic is similar to poll_cq(), but not quite the same | |
346 | * unfortunately. Need to move pertinent HW CQEs to the SW CQ but | |
347 | * also do any translation magic that poll_cq() normally does. | |
348 | */ | |
cfdda9d7 | 349 | while (!ret) { |
1cf24dce SW |
350 | qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); |
351 | ||
352 | /* | |
353 | * drop CQEs with no associated QP | |
354 | */ | |
355 | if (qhp == NULL) | |
356 | goto next_cqe; | |
357 | ||
358 | if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) | |
359 | goto next_cqe; | |
360 | ||
361 | if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) { | |
362 | ||
70b9c660 SW |
363 | /* If we have reached here because of async |
364 | * event or other error, and have egress error | |
365 | * then drop | |
366 | */ | |
367 | if (CQE_TYPE(hw_cqe) == 1) | |
368 | goto next_cqe; | |
369 | ||
370 | /* drop peer2peer RTR reads. | |
1cf24dce SW |
371 | */ |
372 | if (CQE_WRID_STAG(hw_cqe) == 1) | |
373 | goto next_cqe; | |
374 | ||
375 | /* | |
376 | * Eat completions for unsignaled read WRs. | |
377 | */ | |
378 | if (!qhp->wq.sq.oldest_read->signaled) { | |
379 | advance_oldest_read(&qhp->wq); | |
380 | goto next_cqe; | |
381 | } | |
382 | ||
383 | /* | |
384 | * Don't write to the HWCQ, create a new read req CQE | |
385 | * in local memory and move it into the swcq. | |
386 | */ | |
387 | create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); | |
388 | hw_cqe = &read_cqe; | |
389 | advance_oldest_read(&qhp->wq); | |
390 | } | |
391 | ||
392 | /* if its a SQ completion, then do the magic to move all the | |
393 | * unsignaled and now in-order completions into the swcq. | |
394 | */ | |
395 | if (SQ_TYPE(hw_cqe)) { | |
396 | swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; | |
397 | swsqe->cqe = *hw_cqe; | |
398 | swsqe->complete = 1; | |
399 | flush_completed_wrs(&qhp->wq, &chp->cq); | |
400 | } else { | |
401 | swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; | |
402 | *swcqe = *hw_cqe; | |
a56c66e8 | 403 | swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1)); |
1cf24dce SW |
404 | t4_swcq_produce(&chp->cq); |
405 | } | |
406 | next_cqe: | |
407 | t4_hwcq_consume(&chp->cq); | |
408 | ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); | |
cfdda9d7 SW |
409 | } |
410 | } | |
411 | ||
412 | static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) | |
413 | { | |
414 | if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) | |
415 | return 0; | |
416 | ||
417 | if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) | |
418 | return 0; | |
419 | ||
420 | if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) | |
421 | return 0; | |
422 | ||
423 | if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) | |
424 | return 0; | |
425 | return 1; | |
426 | } | |
427 | ||
cfdda9d7 SW |
428 | void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) |
429 | { | |
430 | struct t4_cqe *cqe; | |
431 | u32 ptr; | |
432 | ||
433 | *count = 0; | |
434 | PDBG("%s count zero %d\n", __func__, *count); | |
435 | ptr = cq->sw_cidx; | |
436 | while (ptr != cq->sw_pidx) { | |
437 | cqe = &cq->sw_queue[ptr]; | |
438 | if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && | |
c34c97ad | 439 | (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) |
cfdda9d7 SW |
440 | (*count)++; |
441 | if (++ptr == cq->size) | |
442 | ptr = 0; | |
443 | } | |
444 | PDBG("%s cq %p count %d\n", __func__, cq, *count); | |
445 | } | |
446 | ||
cfdda9d7 SW |
447 | /* |
448 | * poll_cq | |
449 | * | |
450 | * Caller must: | |
451 | * check the validity of the first CQE, | |
452 | * supply the wq assicated with the qpid. | |
453 | * | |
454 | * credit: cq credit to return to sge. | |
455 | * cqe_flushed: 1 iff the CQE is flushed. | |
456 | * cqe: copy of the polled CQE. | |
457 | * | |
458 | * return value: | |
459 | * 0 CQE returned ok. | |
460 | * -EAGAIN CQE skipped, try again. | |
461 | * -EOVERFLOW CQ overflow detected. | |
462 | */ | |
463 | static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, | |
464 | u8 *cqe_flushed, u64 *cookie, u32 *credit) | |
465 | { | |
466 | int ret = 0; | |
467 | struct t4_cqe *hw_cqe, read_cqe; | |
468 | ||
469 | *cqe_flushed = 0; | |
470 | *credit = 0; | |
471 | ret = t4_next_cqe(cq, &hw_cqe); | |
472 | if (ret) | |
473 | return ret; | |
474 | ||
475 | PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x" | |
476 | " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", | |
477 | __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), | |
478 | CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe), | |
479 | CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe), | |
480 | CQE_WRID_LOW(hw_cqe)); | |
481 | ||
482 | /* | |
483 | * skip cqe's not affiliated with a QP. | |
484 | */ | |
485 | if (wq == NULL) { | |
486 | ret = -EAGAIN; | |
487 | goto skip_cqe; | |
488 | } | |
489 | ||
1cf24dce SW |
490 | /* |
491 | * skip hw cqe's if the wq is flushed. | |
492 | */ | |
493 | if (wq->flushed && !SW_CQE(hw_cqe)) { | |
494 | ret = -EAGAIN; | |
495 | goto skip_cqe; | |
496 | } | |
497 | ||
498 | /* | |
499 | * skip TERMINATE cqes... | |
500 | */ | |
501 | if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) { | |
502 | ret = -EAGAIN; | |
503 | goto skip_cqe; | |
504 | } | |
505 | ||
cfdda9d7 SW |
506 | /* |
507 | * Gotta tweak READ completions: | |
508 | * 1) the cqe doesn't contain the sq_wptr from the wr. | |
509 | * 2) opcode not reflected from the wr. | |
510 | * 3) read_len not reflected from the wr. | |
511 | * 4) cq_type is RQ_TYPE not SQ_TYPE. | |
512 | */ | |
513 | if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { | |
514 | ||
70b9c660 SW |
515 | /* If we have reached here because of async |
516 | * event or other error, and have egress error | |
517 | * then drop | |
518 | */ | |
519 | if (CQE_TYPE(hw_cqe) == 1) { | |
520 | if (CQE_STATUS(hw_cqe)) | |
521 | t4_set_wq_in_error(wq); | |
522 | ret = -EAGAIN; | |
523 | goto skip_cqe; | |
524 | } | |
525 | ||
526 | /* If this is an unsolicited read response, then the read | |
cfdda9d7 SW |
527 | * was generated by the kernel driver as part of peer-2-peer |
528 | * connection setup. So ignore the completion. | |
529 | */ | |
1cf24dce | 530 | if (CQE_WRID_STAG(hw_cqe) == 1) { |
cfdda9d7 SW |
531 | if (CQE_STATUS(hw_cqe)) |
532 | t4_set_wq_in_error(wq); | |
533 | ret = -EAGAIN; | |
534 | goto skip_cqe; | |
535 | } | |
536 | ||
1cf24dce SW |
537 | /* |
538 | * Eat completions for unsignaled read WRs. | |
539 | */ | |
540 | if (!wq->sq.oldest_read->signaled) { | |
541 | advance_oldest_read(wq); | |
542 | ret = -EAGAIN; | |
543 | goto skip_cqe; | |
544 | } | |
545 | ||
cfdda9d7 SW |
546 | /* |
547 | * Don't write to the HWCQ, so create a new read req CQE | |
548 | * in local memory. | |
549 | */ | |
550 | create_read_req_cqe(wq, hw_cqe, &read_cqe); | |
551 | hw_cqe = &read_cqe; | |
552 | advance_oldest_read(wq); | |
553 | } | |
554 | ||
555 | if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { | |
1cf24dce | 556 | *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH); |
cfdda9d7 | 557 | t4_set_wq_in_error(wq); |
6ff0e343 SW |
558 | } |
559 | ||
cfdda9d7 SW |
560 | /* |
561 | * RECV completion. | |
562 | */ | |
563 | if (RQ_TYPE(hw_cqe)) { | |
564 | ||
565 | /* | |
566 | * HW only validates 4 bits of MSN. So we must validate that | |
567 | * the MSN in the SEND is the next expected MSN. If its not, | |
568 | * then we complete this with T4_ERR_MSN and mark the wq in | |
569 | * error. | |
570 | */ | |
571 | ||
572 | if (t4_rq_empty(wq)) { | |
573 | t4_set_wq_in_error(wq); | |
574 | ret = -EAGAIN; | |
575 | goto skip_cqe; | |
576 | } | |
577 | if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { | |
578 | t4_set_wq_in_error(wq); | |
a56c66e8 | 579 | hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); |
cfdda9d7 SW |
580 | goto proc_cqe; |
581 | } | |
582 | goto proc_cqe; | |
583 | } | |
584 | ||
585 | /* | |
586 | * If we get here its a send completion. | |
587 | * | |
588 | * Handle out of order completion. These get stuffed | |
589 | * in the SW SQ. Then the SW SQ is walked to move any | |
590 | * now in-order completions into the SW CQ. This handles | |
591 | * 2 cases: | |
592 | * 1) reaping unsignaled WRs when the first subsequent | |
593 | * signaled WR is completed. | |
594 | * 2) out of order read completions. | |
595 | */ | |
596 | if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { | |
597 | struct t4_swsqe *swsqe; | |
598 | ||
599 | PDBG("%s out of order completion going in sw_sq at idx %u\n", | |
600 | __func__, CQE_WRID_SQ_IDX(hw_cqe)); | |
601 | swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; | |
602 | swsqe->cqe = *hw_cqe; | |
603 | swsqe->complete = 1; | |
604 | ret = -EAGAIN; | |
605 | goto flush_wq; | |
606 | } | |
607 | ||
608 | proc_cqe: | |
609 | *cqe = *hw_cqe; | |
610 | ||
611 | /* | |
612 | * Reap the associated WR(s) that are freed up with this | |
613 | * completion. | |
614 | */ | |
615 | if (SQ_TYPE(hw_cqe)) { | |
1cf24dce | 616 | int idx = CQE_WRID_SQ_IDX(hw_cqe); |
8a9c399e | 617 | BUG_ON(idx >= wq->sq.size); |
1cf24dce SW |
618 | |
619 | /* | |
620 | * Account for any unsignaled completions completed by | |
621 | * this signaled completion. In this case, cidx points | |
622 | * to the first unsignaled one, and idx points to the | |
623 | * signaled one. So adjust in_use based on this delta. | |
624 | * if this is not completing any unsigned wrs, then the | |
27ca34f5 | 625 | * delta will be 0. Handle wrapping also! |
1cf24dce | 626 | */ |
27ca34f5 SW |
627 | if (idx < wq->sq.cidx) |
628 | wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; | |
629 | else | |
630 | wq->sq.in_use -= idx - wq->sq.cidx; | |
8a9c399e | 631 | BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); |
1cf24dce SW |
632 | |
633 | wq->sq.cidx = (uint16_t)idx; | |
cfdda9d7 SW |
634 | PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); |
635 | *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; | |
7730b4c7 HS |
636 | if (c4iw_wr_log) |
637 | c4iw_log_wr_stats(wq, hw_cqe); | |
cfdda9d7 SW |
638 | t4_sq_consume(wq); |
639 | } else { | |
640 | PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); | |
641 | *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; | |
642 | BUG_ON(t4_rq_empty(wq)); | |
7730b4c7 HS |
643 | if (c4iw_wr_log) |
644 | c4iw_log_wr_stats(wq, hw_cqe); | |
cfdda9d7 | 645 | t4_rq_consume(wq); |
1cf24dce | 646 | goto skip_cqe; |
cfdda9d7 SW |
647 | } |
648 | ||
649 | flush_wq: | |
650 | /* | |
651 | * Flush any completed cqes that are now in-order. | |
652 | */ | |
653 | flush_completed_wrs(wq, cq); | |
654 | ||
655 | skip_cqe: | |
656 | if (SW_CQE(hw_cqe)) { | |
657 | PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n", | |
658 | __func__, cq, cq->cqid, cq->sw_cidx); | |
659 | t4_swcq_consume(cq); | |
660 | } else { | |
661 | PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n", | |
662 | __func__, cq, cq->cqid, cq->cidx); | |
663 | t4_hwcq_consume(cq); | |
664 | } | |
665 | return ret; | |
666 | } | |
667 | ||
668 | /* | |
669 | * Get one cq entry from c4iw and map it to openib. | |
670 | * | |
671 | * Returns: | |
672 | * 0 cqe returned | |
673 | * -ENODATA EMPTY; | |
674 | * -EAGAIN caller must try again | |
675 | * any other -errno fatal error | |
676 | */ | |
677 | static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | |
678 | { | |
679 | struct c4iw_qp *qhp = NULL; | |
97df1c67 | 680 | struct t4_cqe uninitialized_var(cqe), *rd_cqe; |
cfdda9d7 SW |
681 | struct t4_wq *wq; |
682 | u32 credit = 0; | |
683 | u8 cqe_flushed; | |
684 | u64 cookie = 0; | |
685 | int ret; | |
686 | ||
687 | ret = t4_next_cqe(&chp->cq, &rd_cqe); | |
688 | ||
689 | if (ret) | |
690 | return ret; | |
691 | ||
692 | qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); | |
693 | if (!qhp) | |
694 | wq = NULL; | |
695 | else { | |
696 | spin_lock(&qhp->lock); | |
697 | wq = &(qhp->wq); | |
698 | } | |
699 | ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); | |
700 | if (ret) | |
701 | goto out; | |
702 | ||
703 | wc->wr_id = cookie; | |
704 | wc->qp = &qhp->ibqp; | |
705 | wc->vendor_err = CQE_STATUS(&cqe); | |
706 | wc->wc_flags = 0; | |
707 | ||
708 | PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x " | |
709 | "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe), | |
710 | CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe), | |
711 | CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie); | |
712 | ||
713 | if (CQE_TYPE(&cqe) == 0) { | |
714 | if (!CQE_STATUS(&cqe)) | |
715 | wc->byte_len = CQE_LEN(&cqe); | |
716 | else | |
717 | wc->byte_len = 0; | |
718 | wc->opcode = IB_WC_RECV; | |
719 | if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV || | |
720 | CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { | |
721 | wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); | |
722 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; | |
723 | } | |
724 | } else { | |
725 | switch (CQE_OPCODE(&cqe)) { | |
726 | case FW_RI_RDMA_WRITE: | |
727 | wc->opcode = IB_WC_RDMA_WRITE; | |
728 | break; | |
729 | case FW_RI_READ_REQ: | |
730 | wc->opcode = IB_WC_RDMA_READ; | |
731 | wc->byte_len = CQE_LEN(&cqe); | |
732 | break; | |
733 | case FW_RI_SEND_WITH_INV: | |
734 | case FW_RI_SEND_WITH_SE_INV: | |
735 | wc->opcode = IB_WC_SEND; | |
736 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; | |
737 | break; | |
738 | case FW_RI_SEND: | |
739 | case FW_RI_SEND_WITH_SE: | |
740 | wc->opcode = IB_WC_SEND; | |
741 | break; | |
742 | case FW_RI_BIND_MW: | |
743 | wc->opcode = IB_WC_BIND_MW; | |
744 | break; | |
745 | ||
746 | case FW_RI_LOCAL_INV: | |
747 | wc->opcode = IB_WC_LOCAL_INV; | |
748 | break; | |
749 | case FW_RI_FAST_REGISTER: | |
750 | wc->opcode = IB_WC_FAST_REG_MR; | |
751 | break; | |
752 | default: | |
753 | printk(KERN_ERR MOD "Unexpected opcode %d " | |
754 | "in the CQE received for QPID=0x%0x\n", | |
755 | CQE_OPCODE(&cqe), CQE_QPID(&cqe)); | |
756 | ret = -EINVAL; | |
757 | goto out; | |
758 | } | |
759 | } | |
760 | ||
761 | if (cqe_flushed) | |
762 | wc->status = IB_WC_WR_FLUSH_ERR; | |
763 | else { | |
764 | ||
765 | switch (CQE_STATUS(&cqe)) { | |
766 | case T4_ERR_SUCCESS: | |
767 | wc->status = IB_WC_SUCCESS; | |
768 | break; | |
769 | case T4_ERR_STAG: | |
770 | wc->status = IB_WC_LOC_ACCESS_ERR; | |
771 | break; | |
772 | case T4_ERR_PDID: | |
773 | wc->status = IB_WC_LOC_PROT_ERR; | |
774 | break; | |
775 | case T4_ERR_QPID: | |
776 | case T4_ERR_ACCESS: | |
777 | wc->status = IB_WC_LOC_ACCESS_ERR; | |
778 | break; | |
779 | case T4_ERR_WRAP: | |
780 | wc->status = IB_WC_GENERAL_ERR; | |
781 | break; | |
782 | case T4_ERR_BOUND: | |
783 | wc->status = IB_WC_LOC_LEN_ERR; | |
784 | break; | |
785 | case T4_ERR_INVALIDATE_SHARED_MR: | |
786 | case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: | |
787 | wc->status = IB_WC_MW_BIND_ERR; | |
788 | break; | |
789 | case T4_ERR_CRC: | |
790 | case T4_ERR_MARKER: | |
791 | case T4_ERR_PDU_LEN_ERR: | |
792 | case T4_ERR_OUT_OF_RQE: | |
793 | case T4_ERR_DDP_VERSION: | |
794 | case T4_ERR_RDMA_VERSION: | |
795 | case T4_ERR_DDP_QUEUE_NUM: | |
796 | case T4_ERR_MSN: | |
797 | case T4_ERR_TBIT: | |
798 | case T4_ERR_MO: | |
799 | case T4_ERR_MSN_RANGE: | |
800 | case T4_ERR_IRD_OVERFLOW: | |
801 | case T4_ERR_OPCODE: | |
6ff0e343 | 802 | case T4_ERR_INTERNAL_ERR: |
cfdda9d7 SW |
803 | wc->status = IB_WC_FATAL_ERR; |
804 | break; | |
805 | case T4_ERR_SWFLUSH: | |
806 | wc->status = IB_WC_WR_FLUSH_ERR; | |
807 | break; | |
808 | default: | |
809 | printk(KERN_ERR MOD | |
810 | "Unexpected cqe_status 0x%x for QPID=0x%0x\n", | |
811 | CQE_STATUS(&cqe), CQE_QPID(&cqe)); | |
812 | ret = -EINVAL; | |
813 | } | |
814 | } | |
815 | out: | |
816 | if (wq) | |
817 | spin_unlock(&qhp->lock); | |
818 | return ret; | |
819 | } | |
820 | ||
821 | int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |
822 | { | |
823 | struct c4iw_cq *chp; | |
824 | unsigned long flags; | |
825 | int npolled; | |
826 | int err = 0; | |
827 | ||
828 | chp = to_c4iw_cq(ibcq); | |
829 | ||
830 | spin_lock_irqsave(&chp->lock, flags); | |
831 | for (npolled = 0; npolled < num_entries; ++npolled) { | |
832 | do { | |
833 | err = c4iw_poll_cq_one(chp, wc + npolled); | |
834 | } while (err == -EAGAIN); | |
835 | if (err) | |
836 | break; | |
837 | } | |
838 | spin_unlock_irqrestore(&chp->lock, flags); | |
839 | return !err || err == -ENODATA ? npolled : err; | |
840 | } | |
841 | ||
842 | int c4iw_destroy_cq(struct ib_cq *ib_cq) | |
843 | { | |
844 | struct c4iw_cq *chp; | |
845 | struct c4iw_ucontext *ucontext; | |
846 | ||
847 | PDBG("%s ib_cq %p\n", __func__, ib_cq); | |
848 | chp = to_c4iw_cq(ib_cq); | |
849 | ||
850 | remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); | |
851 | atomic_dec(&chp->refcnt); | |
852 | wait_event(chp->wait, !atomic_read(&chp->refcnt)); | |
853 | ||
854 | ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) | |
855 | : NULL; | |
856 | destroy_cq(&chp->rhp->rdev, &chp->cq, | |
857 | ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx); | |
858 | kfree(chp); | |
859 | return 0; | |
860 | } | |
861 | ||
862 | struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |
863 | int vector, struct ib_ucontext *ib_context, | |
864 | struct ib_udata *udata) | |
865 | { | |
866 | struct c4iw_dev *rhp; | |
867 | struct c4iw_cq *chp; | |
868 | struct c4iw_create_cq_resp uresp; | |
869 | struct c4iw_ucontext *ucontext = NULL; | |
870 | int ret; | |
1973e8b8 | 871 | size_t memsize, hwentries; |
cfdda9d7 SW |
872 | struct c4iw_mm_entry *mm, *mm2; |
873 | ||
874 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); | |
875 | ||
876 | rhp = to_c4iw_dev(ibdev); | |
877 | ||
cf38be6d HS |
878 | if (vector >= rhp->rdev.lldi.nciq) |
879 | return ERR_PTR(-EINVAL); | |
880 | ||
cfdda9d7 SW |
881 | chp = kzalloc(sizeof(*chp), GFP_KERNEL); |
882 | if (!chp) | |
883 | return ERR_PTR(-ENOMEM); | |
884 | ||
885 | if (ib_context) | |
886 | ucontext = to_c4iw_ucontext(ib_context); | |
887 | ||
888 | /* account for the status page. */ | |
889 | entries++; | |
890 | ||
895cf5f3 SW |
891 | /* IQ needs one extra entry to differentiate full vs empty. */ |
892 | entries++; | |
893 | ||
cfdda9d7 SW |
894 | /* |
895 | * entries must be multiple of 16 for HW. | |
896 | */ | |
897 | entries = roundup(entries, 16); | |
1973e8b8 SW |
898 | |
899 | /* | |
900 | * Make actual HW queue 2x to avoid cdix_inc overflows. | |
901 | */ | |
04e10e21 | 902 | hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); |
1973e8b8 SW |
903 | |
904 | /* | |
905 | * Make HW queue at least 64 entries so GTS updates aren't too | |
906 | * frequent. | |
907 | */ | |
908 | if (hwentries < 64) | |
909 | hwentries = 64; | |
910 | ||
911 | memsize = hwentries * sizeof *chp->cq.queue; | |
cfdda9d7 SW |
912 | |
913 | /* | |
914 | * memsize must be a multiple of the page size if its a user cq. | |
915 | */ | |
66eb19af | 916 | if (ucontext) |
cfdda9d7 | 917 | memsize = roundup(memsize, PAGE_SIZE); |
1973e8b8 | 918 | chp->cq.size = hwentries; |
cfdda9d7 | 919 | chp->cq.memsize = memsize; |
cf38be6d | 920 | chp->cq.vector = vector; |
cfdda9d7 SW |
921 | |
922 | ret = create_cq(&rhp->rdev, &chp->cq, | |
923 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
924 | if (ret) | |
925 | goto err1; | |
926 | ||
927 | chp->rhp = rhp; | |
928 | chp->cq.size--; /* status page */ | |
1973e8b8 | 929 | chp->ibcq.cqe = entries - 2; |
cfdda9d7 | 930 | spin_lock_init(&chp->lock); |
581bbe2c | 931 | spin_lock_init(&chp->comp_handler_lock); |
cfdda9d7 SW |
932 | atomic_set(&chp->refcnt, 1); |
933 | init_waitqueue_head(&chp->wait); | |
934 | ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); | |
935 | if (ret) | |
936 | goto err2; | |
937 | ||
938 | if (ucontext) { | |
939 | mm = kmalloc(sizeof *mm, GFP_KERNEL); | |
940 | if (!mm) | |
941 | goto err3; | |
942 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | |
943 | if (!mm2) | |
944 | goto err4; | |
945 | ||
946 | uresp.qid_mask = rhp->rdev.cqmask; | |
947 | uresp.cqid = chp->cq.cqid; | |
948 | uresp.size = chp->cq.size; | |
949 | uresp.memsize = chp->cq.memsize; | |
950 | spin_lock(&ucontext->mmap_lock); | |
951 | uresp.key = ucontext->key; | |
952 | ucontext->key += PAGE_SIZE; | |
953 | uresp.gts_key = ucontext->key; | |
954 | ucontext->key += PAGE_SIZE; | |
955 | spin_unlock(&ucontext->mmap_lock); | |
b6f04d3d YD |
956 | ret = ib_copy_to_udata(udata, &uresp, |
957 | sizeof(uresp) - sizeof(uresp.reserved)); | |
cfdda9d7 SW |
958 | if (ret) |
959 | goto err5; | |
960 | ||
961 | mm->key = uresp.key; | |
962 | mm->addr = virt_to_phys(chp->cq.queue); | |
963 | mm->len = chp->cq.memsize; | |
964 | insert_mmap(ucontext, mm); | |
965 | ||
966 | mm2->key = uresp.gts_key; | |
967 | mm2->addr = chp->cq.ugts; | |
968 | mm2->len = PAGE_SIZE; | |
969 | insert_mmap(ucontext, mm2); | |
970 | } | |
971 | PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", | |
972 | __func__, chp->cq.cqid, chp, chp->cq.size, | |
6198dd8d | 973 | chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); |
cfdda9d7 SW |
974 | return &chp->ibcq; |
975 | err5: | |
976 | kfree(mm2); | |
977 | err4: | |
978 | kfree(mm); | |
979 | err3: | |
980 | remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); | |
981 | err2: | |
982 | destroy_cq(&chp->rhp->rdev, &chp->cq, | |
983 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
984 | err1: | |
985 | kfree(chp); | |
986 | return ERR_PTR(ret); | |
987 | } | |
988 | ||
989 | int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) | |
990 | { | |
991 | return -ENOSYS; | |
992 | } | |
993 | ||
994 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | |
995 | { | |
996 | struct c4iw_cq *chp; | |
997 | int ret; | |
998 | unsigned long flag; | |
999 | ||
1000 | chp = to_c4iw_cq(ibcq); | |
1001 | spin_lock_irqsave(&chp->lock, flag); | |
1002 | ret = t4_arm_cq(&chp->cq, | |
1003 | (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); | |
1004 | spin_unlock_irqrestore(&chp->lock, flag); | |
1005 | if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) | |
1006 | ret = 0; | |
1007 | return ret; | |
1008 | } |