RDMA/cxgb4: Set fence flag for inv-local-stag work requests
[linux-2.6-block.git] / drivers / infiniband / hw / cxgb4 / qp.c
CommitLineData
cfdda9d7
SW
1/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iw_cxgb4.h"
33
34static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
35 struct c4iw_dev_ucontext *uctx)
36{
37 /*
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue,
43 pci_unmap_addr(&wq->rq, mapping));
44 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue,
46 pci_unmap_addr(&wq->sq, mapping));
47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq);
50 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
51 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
52 return 0;
53}
54
55static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
56 struct t4_cq *rcq, struct t4_cq *scq,
57 struct c4iw_dev_ucontext *uctx)
58{
59 int user = (uctx != &rdev->uctx);
60 struct fw_ri_res_wr *res_wr;
61 struct fw_ri_res *res;
62 int wr_len;
63 struct c4iw_wr_wait wr_wait;
64 struct sk_buff *skb;
65 int ret;
66 int eqsize;
67
68 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
69 if (!wq->sq.qid)
70 return -ENOMEM;
71
72 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
73 if (!wq->rq.qid)
74 goto err1;
75
76 if (!user) {
77 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
78 GFP_KERNEL);
79 if (!wq->sq.sw_sq)
80 goto err2;
81
82 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
83 GFP_KERNEL);
84 if (!wq->rq.sw_rq)
85 goto err3;
86 }
87
88 /*
89 * RQT must be a power of 2.
90 */
91 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
92 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
93 if (!wq->rq.rqt_hwaddr)
94 goto err4;
95
96 wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
97 wq->sq.memsize, &(wq->sq.dma_addr),
98 GFP_KERNEL);
99 if (!wq->sq.queue)
100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize);
102 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr),
106 GFP_KERNEL);
107 if (!wq->rq.queue)
108 goto err6;
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__, wq->sq.queue,
111 (unsigned long long)virt_to_phys(wq->sq.queue),
112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize);
115 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
116
117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg;
119 if (user) {
120 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
121 (wq->sq.qid << rdev->qpshift);
122 wq->sq.udb &= PAGE_MASK;
123 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
124 (wq->rq.qid << rdev->qpshift);
125 wq->rq.udb &= PAGE_MASK;
126 }
127 wq->rdev = rdev;
128 wq->rq.msn = 1;
129
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + 2 * sizeof *res;
132
133 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
134 if (!skb) {
135 ret = -ENOMEM;
136 goto err7;
137 }
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
139
140 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(2) |
145 FW_WR_COMPL(1));
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
147 res_wr->cookie = (u64)&wr_wait;
148 res = res_wr->res;
149 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
150 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
151
152 /*
153 * eqsize is the number of 64B entries plus the status page size.
154 */
155 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
156
157 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq->cqid));
162 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
165 V_FW_RI_RES_WR_FBMIN(3) |
166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize));
170 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
171 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
172 res++;
173 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
174 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
175
176 /*
177 * eqsize is the number of 64B entries plus the status page size.
178 */
179 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
180 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq->cqid));
185 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
188 V_FW_RI_RES_WR_FBMIN(3) |
189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize));
193 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
194 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
195
196 c4iw_init_wr_wait(&wr_wait);
197
198 ret = c4iw_ofld_send(rdev, skb);
199 if (ret)
200 goto err7;
201 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
202 if (!wr_wait.done) {
203 printk(KERN_ERR MOD "Device %s not responding!\n",
204 pci_name(rdev->lldi.pdev));
205 rdev->flags = T4_FATAL_ERROR;
206 ret = -EIO;
207 } else
208 ret = wr_wait.ret;
209 if (ret)
210 goto err7;
211
212 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
213 __func__, wq->sq.qid, wq->rq.qid, wq->db,
214 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
215
216 return 0;
217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue,
220 pci_unmap_addr(&wq->rq, mapping));
221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue,
224 pci_unmap_addr(&wq->sq, mapping));
225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4:
228 kfree(wq->rq.sw_rq);
229err3:
230 kfree(wq->sq.sw_sq);
231err2:
232 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
233err1:
234 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
235 return -ENOMEM;
236}
237
238static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
239{
240 int i;
241 u32 plen;
242 int size;
243 u8 *datap;
244
245 if (wr->num_sge > T4_MAX_SEND_SGE)
246 return -EINVAL;
247 switch (wr->opcode) {
248 case IB_WR_SEND:
249 if (wr->send_flags & IB_SEND_SOLICITED)
250 wqe->send.sendop_pkd = cpu_to_be32(
251 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
252 else
253 wqe->send.sendop_pkd = cpu_to_be32(
254 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
255 wqe->send.stag_inv = 0;
256 break;
257 case IB_WR_SEND_WITH_INV:
258 if (wr->send_flags & IB_SEND_SOLICITED)
259 wqe->send.sendop_pkd = cpu_to_be32(
260 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
261 else
262 wqe->send.sendop_pkd = cpu_to_be32(
263 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
264 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
265 break;
266
267 default:
268 return -EINVAL;
269 }
270 plen = 0;
271 if (wr->num_sge) {
272 if (wr->send_flags & IB_SEND_INLINE) {
273 datap = (u8 *)wqe->send.u.immd_src[0].data;
274 for (i = 0; i < wr->num_sge; i++) {
275 if ((plen + wr->sg_list[i].length) >
276 T4_MAX_SEND_INLINE) {
277 return -EMSGSIZE;
278 }
279 plen += wr->sg_list[i].length;
280 memcpy(datap,
281 (void *)(unsigned long)wr->sg_list[i].addr,
282 wr->sg_list[i].length);
283 datap += wr->sg_list[i].length;
284 }
285 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
286 wqe->send.u.immd_src[0].r1 = 0;
287 wqe->send.u.immd_src[0].r2 = 0;
288 wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
289 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
290 plen;
291 } else {
292 for (i = 0; i < wr->num_sge; i++) {
293 if ((plen + wr->sg_list[i].length) < plen)
294 return -EMSGSIZE;
295 plen += wr->sg_list[i].length;
296 wqe->send.u.isgl_src[0].sge[i].stag =
297 cpu_to_be32(wr->sg_list[i].lkey);
298 wqe->send.u.isgl_src[0].sge[i].len =
299 cpu_to_be32(wr->sg_list[i].length);
300 wqe->send.u.isgl_src[0].sge[i].to =
301 cpu_to_be64(wr->sg_list[i].addr);
302 }
303 wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
304 wqe->send.u.isgl_src[0].r1 = 0;
305 wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
306 wqe->send.u.isgl_src[0].r2 = 0;
307 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
308 wr->num_sge * sizeof(struct fw_ri_sge);
309 }
310 } else {
311 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
312 wqe->send.u.immd_src[0].r1 = 0;
313 wqe->send.u.immd_src[0].r2 = 0;
314 wqe->send.u.immd_src[0].immdlen = 0;
315 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
316 }
317 *len16 = DIV_ROUND_UP(size, 16);
318 wqe->send.plen = cpu_to_be32(plen);
319 return 0;
320}
321
322static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
323{
324 int i;
325 u32 plen;
326 int size;
327 u8 *datap;
328
329 if (wr->num_sge > T4_MAX_WRITE_SGE)
330 return -EINVAL;
331 wqe->write.r2 = 0;
332 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
333 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
334 plen = 0;
335 if (wr->num_sge) {
336 if (wr->send_flags & IB_SEND_INLINE) {
337 datap = (u8 *)wqe->write.u.immd_src[0].data;
338 for (i = 0; i < wr->num_sge; i++) {
339 if ((plen + wr->sg_list[i].length) >
340 T4_MAX_WRITE_INLINE) {
341 return -EMSGSIZE;
342 }
343 plen += wr->sg_list[i].length;
344 memcpy(datap,
345 (void *)(unsigned long)wr->sg_list[i].addr,
346 wr->sg_list[i].length);
347 datap += wr->sg_list[i].length;
348 }
349 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
350 wqe->write.u.immd_src[0].r1 = 0;
351 wqe->write.u.immd_src[0].r2 = 0;
352 wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
353 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
354 plen;
355 } else {
356 for (i = 0; i < wr->num_sge; i++) {
357 if ((plen + wr->sg_list[i].length) < plen)
358 return -EMSGSIZE;
359 plen += wr->sg_list[i].length;
360 wqe->write.u.isgl_src[0].sge[i].stag =
361 cpu_to_be32(wr->sg_list[i].lkey);
362 wqe->write.u.isgl_src[0].sge[i].len =
363 cpu_to_be32(wr->sg_list[i].length);
364 wqe->write.u.isgl_src[0].sge[i].to =
365 cpu_to_be64(wr->sg_list[i].addr);
366 }
367 wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
368 wqe->write.u.isgl_src[0].r1 = 0;
369 wqe->write.u.isgl_src[0].nsge =
370 cpu_to_be16(wr->num_sge);
371 wqe->write.u.isgl_src[0].r2 = 0;
372 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
373 wr->num_sge * sizeof(struct fw_ri_sge);
374 }
375 } else {
376 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
377 wqe->write.u.immd_src[0].r1 = 0;
378 wqe->write.u.immd_src[0].r2 = 0;
379 wqe->write.u.immd_src[0].immdlen = 0;
380 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
381 }
382 *len16 = DIV_ROUND_UP(size, 16);
383 wqe->write.plen = cpu_to_be32(plen);
384 return 0;
385}
386
387static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
388{
389 if (wr->num_sge > 1)
390 return -EINVAL;
391 if (wr->num_sge) {
392 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
393 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
394 >> 32));
395 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
396 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
397 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
398 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
399 >> 32));
400 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
401 } else {
402 wqe->read.stag_src = cpu_to_be32(2);
403 wqe->read.to_src_hi = 0;
404 wqe->read.to_src_lo = 0;
405 wqe->read.stag_sink = cpu_to_be32(2);
406 wqe->read.plen = 0;
407 wqe->read.to_sink_hi = 0;
408 wqe->read.to_sink_lo = 0;
409 }
410 wqe->read.r2 = 0;
411 wqe->read.r5 = 0;
412 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
413 return 0;
414}
415
416static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
417 struct ib_recv_wr *wr, u8 *len16)
418{
419 int i;
420 int plen = 0;
421
422 for (i = 0; i < wr->num_sge; i++) {
423 if ((plen + wr->sg_list[i].length) < plen)
424 return -EMSGSIZE;
425 plen += wr->sg_list[i].length;
426 wqe->recv.isgl.sge[i].stag =
427 cpu_to_be32(wr->sg_list[i].lkey);
428 wqe->recv.isgl.sge[i].len =
429 cpu_to_be32(wr->sg_list[i].length);
430 wqe->recv.isgl.sge[i].to =
431 cpu_to_be64(wr->sg_list[i].addr);
432 }
433 for (; i < T4_MAX_RECV_SGE; i++) {
434 wqe->recv.isgl.sge[i].stag = 0;
435 wqe->recv.isgl.sge[i].len = 0;
436 wqe->recv.isgl.sge[i].to = 0;
437 }
438 wqe->recv.isgl.op = FW_RI_DATA_ISGL;
439 wqe->recv.isgl.r1 = 0;
440 wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
441 wqe->recv.isgl.r2 = 0;
442 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
443 wr->num_sge * sizeof(struct fw_ri_sge), 16);
444 return 0;
445}
446
447static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
448{
449
450 struct fw_ri_immd *imdp;
451 __be64 *p;
452 int i;
453 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
454
455 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
456 return -EINVAL;
457
458 wqe->fr.qpbinde_to_dcacpu = 0;
459 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
460 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
461 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
462 wqe->fr.len_hi = 0;
463 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
464 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
465 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
466 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
467 0xffffffff);
468 if (pbllen > T4_MAX_FR_IMMD) {
469 struct c4iw_fr_page_list *c4pl =
470 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
471 struct fw_ri_dsgl *sglp;
472
473 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
474 sglp->op = FW_RI_DATA_DSGL;
475 sglp->r1 = 0;
476 sglp->nsge = cpu_to_be16(1);
477 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
478 sglp->len0 = cpu_to_be32(pbllen);
479
480 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
481 } else {
482 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
483 imdp->op = FW_RI_DATA_IMMD;
484 imdp->r1 = 0;
485 imdp->r2 = 0;
486 imdp->immdlen = cpu_to_be32(pbllen);
487 p = (__be64 *)(imdp + 1);
488 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
489 *p = cpu_to_be64(
490 (u64)wr->wr.fast_reg.page_list->page_list[i]);
491 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
492 16);
493 }
494 return 0;
495}
496
497static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
498 u8 *len16)
499{
500 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
501 wqe->inv.r2 = 0;
502 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
503 return 0;
504}
505
506void c4iw_qp_add_ref(struct ib_qp *qp)
507{
508 PDBG("%s ib_qp %p\n", __func__, qp);
509 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
510}
511
512void c4iw_qp_rem_ref(struct ib_qp *qp)
513{
514 PDBG("%s ib_qp %p\n", __func__, qp);
515 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
516 wake_up(&(to_c4iw_qp(qp)->wait));
517}
518
519int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
520 struct ib_send_wr **bad_wr)
521{
522 int err = 0;
523 u8 len16 = 0;
524 enum fw_wr_opcodes fw_opcode = 0;
525 enum fw_ri_wr_flags fw_flags;
526 struct c4iw_qp *qhp;
527 union t4_wr *wqe;
528 u32 num_wrs;
529 struct t4_swsqe *swsqe;
530 unsigned long flag;
531 u16 idx = 0;
532
533 qhp = to_c4iw_qp(ibqp);
534 spin_lock_irqsave(&qhp->lock, flag);
535 if (t4_wq_in_error(&qhp->wq)) {
536 spin_unlock_irqrestore(&qhp->lock, flag);
537 return -EINVAL;
538 }
539 num_wrs = t4_sq_avail(&qhp->wq);
540 if (num_wrs == 0) {
541 spin_unlock_irqrestore(&qhp->lock, flag);
542 return -ENOMEM;
543 }
544 while (wr) {
545 if (num_wrs == 0) {
546 err = -ENOMEM;
547 *bad_wr = wr;
548 break;
549 }
550 wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
551 fw_flags = 0;
552 if (wr->send_flags & IB_SEND_SOLICITED)
553 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
554 if (wr->send_flags & IB_SEND_SIGNALED)
555 fw_flags |= FW_RI_COMPLETION_FLAG;
556 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
557 switch (wr->opcode) {
558 case IB_WR_SEND_WITH_INV:
559 case IB_WR_SEND:
560 if (wr->send_flags & IB_SEND_FENCE)
561 fw_flags |= FW_RI_READ_FENCE_FLAG;
562 fw_opcode = FW_RI_SEND_WR;
563 if (wr->opcode == IB_WR_SEND)
564 swsqe->opcode = FW_RI_SEND;
565 else
566 swsqe->opcode = FW_RI_SEND_WITH_INV;
567 err = build_rdma_send(wqe, wr, &len16);
568 break;
569 case IB_WR_RDMA_WRITE:
570 fw_opcode = FW_RI_RDMA_WRITE_WR;
571 swsqe->opcode = FW_RI_RDMA_WRITE;
572 err = build_rdma_write(wqe, wr, &len16);
573 break;
574 case IB_WR_RDMA_READ:
575 fw_opcode = FW_RI_RDMA_READ_WR;
576 swsqe->opcode = FW_RI_READ_REQ;
577 fw_flags = 0;
578 err = build_rdma_read(wqe, wr, &len16);
579 if (err)
580 break;
581 swsqe->read_len = wr->sg_list[0].length;
582 if (!qhp->wq.sq.oldest_read)
583 qhp->wq.sq.oldest_read = swsqe;
584 break;
585 case IB_WR_FAST_REG_MR:
586 fw_opcode = FW_RI_FR_NSMR_WR;
587 swsqe->opcode = FW_RI_FAST_REGISTER;
588 err = build_fastreg(wqe, wr, &len16);
589 break;
590 case IB_WR_LOCAL_INV:
4ab1eb9c
SW
591 if (wr->send_flags & IB_SEND_FENCE)
592 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
cfdda9d7
SW
593 fw_opcode = FW_RI_INV_LSTAG_WR;
594 swsqe->opcode = FW_RI_LOCAL_INV;
595 err = build_inv_stag(wqe, wr, &len16);
596 break;
597 default:
598 PDBG("%s post of type=%d TBD!\n", __func__,
599 wr->opcode);
600 err = -EINVAL;
601 }
602 if (err) {
603 *bad_wr = wr;
604 break;
605 }
606 swsqe->idx = qhp->wq.sq.pidx;
607 swsqe->complete = 0;
608 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
609 swsqe->wr_id = wr->wr_id;
610
611 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
612
613 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
614 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
615 swsqe->opcode, swsqe->read_len);
616 wr = wr->next;
617 num_wrs--;
618 t4_sq_produce(&qhp->wq);
619 idx++;
620 }
621 if (t4_wq_db_enabled(&qhp->wq))
622 t4_ring_sq_db(&qhp->wq, idx);
623 spin_unlock_irqrestore(&qhp->lock, flag);
624 return err;
625}
626
627int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
628 struct ib_recv_wr **bad_wr)
629{
630 int err = 0;
631 struct c4iw_qp *qhp;
632 union t4_recv_wr *wqe;
633 u32 num_wrs;
634 u8 len16 = 0;
635 unsigned long flag;
636 u16 idx = 0;
637
638 qhp = to_c4iw_qp(ibqp);
639 spin_lock_irqsave(&qhp->lock, flag);
640 if (t4_wq_in_error(&qhp->wq)) {
641 spin_unlock_irqrestore(&qhp->lock, flag);
642 return -EINVAL;
643 }
644 num_wrs = t4_rq_avail(&qhp->wq);
645 if (num_wrs == 0) {
646 spin_unlock_irqrestore(&qhp->lock, flag);
647 return -ENOMEM;
648 }
649 while (wr) {
650 if (wr->num_sge > T4_MAX_RECV_SGE) {
651 err = -EINVAL;
652 *bad_wr = wr;
653 break;
654 }
655 wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
656 if (num_wrs)
657 err = build_rdma_recv(qhp, wqe, wr, &len16);
658 else
659 err = -ENOMEM;
660 if (err) {
661 *bad_wr = wr;
662 break;
663 }
664
665 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
666
667 wqe->recv.opcode = FW_RI_RECV_WR;
668 wqe->recv.r1 = 0;
669 wqe->recv.wrid = qhp->wq.rq.pidx;
670 wqe->recv.r2[0] = 0;
671 wqe->recv.r2[1] = 0;
672 wqe->recv.r2[2] = 0;
673 wqe->recv.len16 = len16;
674 if (len16 < 5)
675 wqe->flits[8] = 0;
676
677 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
678 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
679 t4_rq_produce(&qhp->wq);
680 wr = wr->next;
681 num_wrs--;
682 idx++;
683 }
684 if (t4_wq_db_enabled(&qhp->wq))
685 t4_ring_rq_db(&qhp->wq, idx);
686 spin_unlock_irqrestore(&qhp->lock, flag);
687 return err;
688}
689
690int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
691{
692 return -ENOSYS;
693}
694
695static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
696 u8 *ecode)
697{
698 int status;
699 int tagged;
700 int opcode;
701 int rqtype;
702 int send_inv;
703
704 if (!err_cqe) {
705 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
706 *ecode = 0;
707 return;
708 }
709
710 status = CQE_STATUS(err_cqe);
711 opcode = CQE_OPCODE(err_cqe);
712 rqtype = RQ_TYPE(err_cqe);
713 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
714 (opcode == FW_RI_SEND_WITH_SE_INV);
715 tagged = (opcode == FW_RI_RDMA_WRITE) ||
716 (rqtype && (opcode == FW_RI_READ_RESP));
717
718 switch (status) {
719 case T4_ERR_STAG:
720 if (send_inv) {
721 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
722 *ecode = RDMAP_CANT_INV_STAG;
723 } else {
724 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
725 *ecode = RDMAP_INV_STAG;
726 }
727 break;
728 case T4_ERR_PDID:
729 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
730 if ((opcode == FW_RI_SEND_WITH_INV) ||
731 (opcode == FW_RI_SEND_WITH_SE_INV))
732 *ecode = RDMAP_CANT_INV_STAG;
733 else
734 *ecode = RDMAP_STAG_NOT_ASSOC;
735 break;
736 case T4_ERR_QPID:
737 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
738 *ecode = RDMAP_STAG_NOT_ASSOC;
739 break;
740 case T4_ERR_ACCESS:
741 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
742 *ecode = RDMAP_ACC_VIOL;
743 break;
744 case T4_ERR_WRAP:
745 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
746 *ecode = RDMAP_TO_WRAP;
747 break;
748 case T4_ERR_BOUND:
749 if (tagged) {
750 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
751 *ecode = DDPT_BASE_BOUNDS;
752 } else {
753 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
754 *ecode = RDMAP_BASE_BOUNDS;
755 }
756 break;
757 case T4_ERR_INVALIDATE_SHARED_MR:
758 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
759 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
760 *ecode = RDMAP_CANT_INV_STAG;
761 break;
762 case T4_ERR_ECC:
763 case T4_ERR_ECC_PSTAG:
764 case T4_ERR_INTERNAL_ERR:
765 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
766 *ecode = 0;
767 break;
768 case T4_ERR_OUT_OF_RQE:
769 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
770 *ecode = DDPU_INV_MSN_NOBUF;
771 break;
772 case T4_ERR_PBL_ADDR_BOUND:
773 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
774 *ecode = DDPT_BASE_BOUNDS;
775 break;
776 case T4_ERR_CRC:
777 *layer_type = LAYER_MPA|DDP_LLP;
778 *ecode = MPA_CRC_ERR;
779 break;
780 case T4_ERR_MARKER:
781 *layer_type = LAYER_MPA|DDP_LLP;
782 *ecode = MPA_MARKER_ERR;
783 break;
784 case T4_ERR_PDU_LEN_ERR:
785 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
786 *ecode = DDPU_MSG_TOOBIG;
787 break;
788 case T4_ERR_DDP_VERSION:
789 if (tagged) {
790 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
791 *ecode = DDPT_INV_VERS;
792 } else {
793 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
794 *ecode = DDPU_INV_VERS;
795 }
796 break;
797 case T4_ERR_RDMA_VERSION:
798 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
799 *ecode = RDMAP_INV_VERS;
800 break;
801 case T4_ERR_OPCODE:
802 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
803 *ecode = RDMAP_INV_OPCODE;
804 break;
805 case T4_ERR_DDP_QUEUE_NUM:
806 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
807 *ecode = DDPU_INV_QN;
808 break;
809 case T4_ERR_MSN:
810 case T4_ERR_MSN_GAP:
811 case T4_ERR_MSN_RANGE:
812 case T4_ERR_IRD_OVERFLOW:
813 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
814 *ecode = DDPU_INV_MSN_RANGE;
815 break;
816 case T4_ERR_TBIT:
817 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
818 *ecode = 0;
819 break;
820 case T4_ERR_MO:
821 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
822 *ecode = DDPU_INV_MO;
823 break;
824 default:
825 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
826 *ecode = 0;
827 break;
828 }
829}
830
831int c4iw_post_zb_read(struct c4iw_qp *qhp)
832{
833 union t4_wr *wqe;
834 struct sk_buff *skb;
835 u8 len16;
836
837 PDBG("%s enter\n", __func__);
838 skb = alloc_skb(40, GFP_KERNEL);
839 if (!skb) {
840 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
841 return -ENOMEM;
842 }
843 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
844
845 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
846 memset(wqe, 0, sizeof wqe->read);
847 wqe->read.r2 = cpu_to_be64(0);
848 wqe->read.stag_sink = cpu_to_be32(1);
849 wqe->read.to_sink_hi = cpu_to_be32(0);
850 wqe->read.to_sink_lo = cpu_to_be32(1);
851 wqe->read.stag_src = cpu_to_be32(1);
852 wqe->read.plen = cpu_to_be32(0);
853 wqe->read.to_src_hi = cpu_to_be32(0);
854 wqe->read.to_src_lo = cpu_to_be32(1);
855 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
856 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
857
858 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
859}
860
be4c9bad
RD
861static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
862 gfp_t gfp)
cfdda9d7
SW
863{
864 struct fw_ri_wr *wqe;
865 struct sk_buff *skb;
866 struct terminate_message *term;
867
868 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
869 qhp->ep->hwtid);
870
be4c9bad 871 skb = alloc_skb(sizeof *wqe, gfp);
cfdda9d7 872 if (!skb)
be4c9bad 873 return;
cfdda9d7
SW
874 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
875
876 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
877 memset(wqe, 0, sizeof *wqe);
878 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
879 wqe->flowid_len16 = cpu_to_be32(
880 FW_WR_FLOWID(qhp->ep->hwtid) |
881 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
882
883 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
884 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
885 term = (struct terminate_message *)wqe->u.terminate.termmsg;
886 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
be4c9bad 887 c4iw_ofld_send(&qhp->rhp->rdev, skb);
cfdda9d7
SW
888}
889
890/*
891 * Assumes qhp lock is held.
892 */
893static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
894 struct c4iw_cq *schp, unsigned long *flag)
895{
896 int count;
897 int flushed;
898
899 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
900 /* take a ref on the qhp since we must release the lock */
901 atomic_inc(&qhp->refcnt);
902 spin_unlock_irqrestore(&qhp->lock, *flag);
903
904 /* locking heirarchy: cq lock first, then qp lock. */
905 spin_lock_irqsave(&rchp->lock, *flag);
906 spin_lock(&qhp->lock);
907 c4iw_flush_hw_cq(&rchp->cq);
908 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
909 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
910 spin_unlock(&qhp->lock);
911 spin_unlock_irqrestore(&rchp->lock, *flag);
912 if (flushed)
913 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
914
915 /* locking heirarchy: cq lock first, then qp lock. */
916 spin_lock_irqsave(&schp->lock, *flag);
917 spin_lock(&qhp->lock);
918 c4iw_flush_hw_cq(&schp->cq);
919 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
920 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
921 spin_unlock(&qhp->lock);
922 spin_unlock_irqrestore(&schp->lock, *flag);
923 if (flushed)
924 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
925
926 /* deref */
927 if (atomic_dec_and_test(&qhp->refcnt))
928 wake_up(&qhp->wait);
929
930 spin_lock_irqsave(&qhp->lock, *flag);
931}
932
933static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
934{
935 struct c4iw_cq *rchp, *schp;
936
937 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
938 schp = get_chp(qhp->rhp, qhp->attr.scq);
939
940 if (qhp->ibqp.uobject) {
941 t4_set_wq_in_error(&qhp->wq);
942 t4_set_cq_in_error(&rchp->cq);
943 if (schp != rchp)
944 t4_set_cq_in_error(&schp->cq);
945 return;
946 }
947 __flush_qp(qhp, rchp, schp, flag);
948}
949
950static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
951{
952 struct fw_ri_wr *wqe;
953 int ret;
954 struct c4iw_wr_wait wr_wait;
955 struct sk_buff *skb;
956
957 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
958 qhp->ep->hwtid);
959
960 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
961 if (!skb)
962 return -ENOMEM;
963 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
964
965 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
966 memset(wqe, 0, sizeof *wqe);
967 wqe->op_compl = cpu_to_be32(
968 FW_WR_OP(FW_RI_INIT_WR) |
969 FW_WR_COMPL(1));
970 wqe->flowid_len16 = cpu_to_be32(
971 FW_WR_FLOWID(qhp->ep->hwtid) |
972 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
973 wqe->cookie = (u64)&wr_wait;
974
975 wqe->u.fini.type = FW_RI_TYPE_FINI;
976 c4iw_init_wr_wait(&wr_wait);
977 ret = c4iw_ofld_send(&rhp->rdev, skb);
978 if (ret)
979 goto out;
980
981 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
982 if (!wr_wait.done) {
983 printk(KERN_ERR MOD "Device %s not responding!\n",
984 pci_name(rhp->rdev.lldi.pdev));
985 rhp->rdev.flags = T4_FATAL_ERROR;
986 ret = -EIO;
987 } else {
988 ret = wr_wait.ret;
989 if (ret)
990 printk(KERN_WARNING MOD
991 "%s: Abnormal close qpid %d ret %u\n",
992 pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
993 ret);
994 }
995out:
996 PDBG("%s ret %d\n", __func__, ret);
997 return ret;
998}
999
1000static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1001{
1002 memset(&init->u, 0, sizeof init->u);
1003 switch (p2p_type) {
1004 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1005 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1006 init->u.write.stag_sink = cpu_to_be32(1);
1007 init->u.write.to_sink = cpu_to_be64(1);
1008 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1009 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1010 sizeof(struct fw_ri_immd),
1011 16);
1012 break;
1013 case FW_RI_INIT_P2PTYPE_READ_REQ:
1014 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1015 init->u.read.stag_src = cpu_to_be32(1);
1016 init->u.read.to_src_lo = cpu_to_be32(1);
1017 init->u.read.stag_sink = cpu_to_be32(1);
1018 init->u.read.to_sink_lo = cpu_to_be32(1);
1019 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1020 break;
1021 }
1022}
1023
1024static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1025{
1026 struct fw_ri_wr *wqe;
1027 int ret;
1028 struct c4iw_wr_wait wr_wait;
1029 struct sk_buff *skb;
1030
1031 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1032 qhp->ep->hwtid);
1033
1034 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
1035 if (!skb)
1036 return -ENOMEM;
1037 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1038
1039 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1040 memset(wqe, 0, sizeof *wqe);
1041 wqe->op_compl = cpu_to_be32(
1042 FW_WR_OP(FW_RI_INIT_WR) |
1043 FW_WR_COMPL(1));
1044 wqe->flowid_len16 = cpu_to_be32(
1045 FW_WR_FLOWID(qhp->ep->hwtid) |
1046 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1047
1048 wqe->cookie = (u64)&wr_wait;
1049
1050 wqe->u.init.type = FW_RI_TYPE_INIT;
1051 wqe->u.init.mpareqbit_p2ptype =
1052 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1053 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1054 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1055 if (qhp->attr.mpa_attr.recv_marker_enabled)
1056 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1057 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1058 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1059 if (qhp->attr.mpa_attr.crc_enabled)
1060 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1061
1062 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1063 FW_RI_QP_RDMA_WRITE_ENABLE |
1064 FW_RI_QP_BIND_ENABLE;
1065 if (!qhp->ibqp.uobject)
1066 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1067 FW_RI_QP_STAG0_ENABLE;
1068 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1069 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1070 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1071 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1072 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1073 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1074 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1075 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1076 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1077 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1078 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1079 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1080 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1081 rhp->rdev.lldi.vr->rq.start);
1082 if (qhp->attr.mpa_attr.initiator)
1083 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1084
1085 c4iw_init_wr_wait(&wr_wait);
1086 ret = c4iw_ofld_send(&rhp->rdev, skb);
1087 if (ret)
1088 goto out;
1089
1090 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
1091 if (!wr_wait.done) {
1092 printk(KERN_ERR MOD "Device %s not responding!\n",
1093 pci_name(rhp->rdev.lldi.pdev));
1094 rhp->rdev.flags = T4_FATAL_ERROR;
1095 ret = -EIO;
1096 } else
1097 ret = wr_wait.ret;
1098out:
1099 PDBG("%s ret %d\n", __func__, ret);
1100 return ret;
1101}
1102
1103int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1104 enum c4iw_qp_attr_mask mask,
1105 struct c4iw_qp_attributes *attrs,
1106 int internal)
1107{
1108 int ret = 0;
1109 struct c4iw_qp_attributes newattr = qhp->attr;
1110 unsigned long flag;
1111 int disconnect = 0;
1112 int terminate = 0;
1113 int abort = 0;
1114 int free = 0;
1115 struct c4iw_ep *ep = NULL;
1116
1117 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1118 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1119 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1120
1121 spin_lock_irqsave(&qhp->lock, flag);
1122
1123 /* Process attr changes if in IDLE */
1124 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1125 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1126 ret = -EIO;
1127 goto out;
1128 }
1129 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1130 newattr.enable_rdma_read = attrs->enable_rdma_read;
1131 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1132 newattr.enable_rdma_write = attrs->enable_rdma_write;
1133 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1134 newattr.enable_bind = attrs->enable_bind;
1135 if (mask & C4IW_QP_ATTR_MAX_ORD) {
be4c9bad 1136 if (attrs->max_ord > c4iw_max_read_depth) {
cfdda9d7
SW
1137 ret = -EINVAL;
1138 goto out;
1139 }
1140 newattr.max_ord = attrs->max_ord;
1141 }
1142 if (mask & C4IW_QP_ATTR_MAX_IRD) {
be4c9bad 1143 if (attrs->max_ird > c4iw_max_read_depth) {
cfdda9d7
SW
1144 ret = -EINVAL;
1145 goto out;
1146 }
1147 newattr.max_ird = attrs->max_ird;
1148 }
1149 qhp->attr = newattr;
1150 }
1151
1152 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1153 goto out;
1154 if (qhp->attr.state == attrs->next_state)
1155 goto out;
1156
1157 switch (qhp->attr.state) {
1158 case C4IW_QP_STATE_IDLE:
1159 switch (attrs->next_state) {
1160 case C4IW_QP_STATE_RTS:
1161 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1162 ret = -EINVAL;
1163 goto out;
1164 }
1165 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1166 ret = -EINVAL;
1167 goto out;
1168 }
1169 qhp->attr.mpa_attr = attrs->mpa_attr;
1170 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1171 qhp->ep = qhp->attr.llp_stream_handle;
1172 qhp->attr.state = C4IW_QP_STATE_RTS;
1173
1174 /*
1175 * Ref the endpoint here and deref when we
1176 * disassociate the endpoint from the QP. This
1177 * happens in CLOSING->IDLE transition or *->ERROR
1178 * transition.
1179 */
1180 c4iw_get_ep(&qhp->ep->com);
1181 spin_unlock_irqrestore(&qhp->lock, flag);
1182 ret = rdma_init(rhp, qhp);
1183 spin_lock_irqsave(&qhp->lock, flag);
1184 if (ret)
1185 goto err;
1186 break;
1187 case C4IW_QP_STATE_ERROR:
1188 qhp->attr.state = C4IW_QP_STATE_ERROR;
1189 flush_qp(qhp, &flag);
1190 break;
1191 default:
1192 ret = -EINVAL;
1193 goto out;
1194 }
1195 break;
1196 case C4IW_QP_STATE_RTS:
1197 switch (attrs->next_state) {
1198 case C4IW_QP_STATE_CLOSING:
1199 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1200 qhp->attr.state = C4IW_QP_STATE_CLOSING;
1201 if (!internal) {
1202 abort = 0;
1203 disconnect = 1;
1204 ep = qhp->ep;
1205 c4iw_get_ep(&ep->com);
1206 }
1207 spin_unlock_irqrestore(&qhp->lock, flag);
1208 ret = rdma_fini(rhp, qhp);
1209 spin_lock_irqsave(&qhp->lock, flag);
1210 if (ret) {
1211 ep = qhp->ep;
1212 c4iw_get_ep(&ep->com);
1213 disconnect = abort = 1;
1214 goto err;
1215 }
1216 break;
1217 case C4IW_QP_STATE_TERMINATE:
1218 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1219 if (qhp->ibqp.uobject)
1220 t4_set_wq_in_error(&qhp->wq);
be4c9bad
RD
1221 ep = qhp->ep;
1222 c4iw_get_ep(&ep->com);
1223 terminate = 1;
1224 disconnect = 1;
cfdda9d7
SW
1225 break;
1226 case C4IW_QP_STATE_ERROR:
1227 qhp->attr.state = C4IW_QP_STATE_ERROR;
1228 if (!internal) {
1229 abort = 1;
1230 disconnect = 1;
1231 ep = qhp->ep;
1232 c4iw_get_ep(&ep->com);
1233 }
1234 goto err;
1235 break;
1236 default:
1237 ret = -EINVAL;
1238 goto out;
1239 }
1240 break;
1241 case C4IW_QP_STATE_CLOSING:
1242 if (!internal) {
1243 ret = -EINVAL;
1244 goto out;
1245 }
1246 switch (attrs->next_state) {
1247 case C4IW_QP_STATE_IDLE:
1248 flush_qp(qhp, &flag);
1249 qhp->attr.state = C4IW_QP_STATE_IDLE;
1250 qhp->attr.llp_stream_handle = NULL;
1251 c4iw_put_ep(&qhp->ep->com);
1252 qhp->ep = NULL;
1253 wake_up(&qhp->wait);
1254 break;
1255 case C4IW_QP_STATE_ERROR:
1256 goto err;
1257 default:
1258 ret = -EINVAL;
1259 goto err;
1260 }
1261 break;
1262 case C4IW_QP_STATE_ERROR:
1263 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1264 ret = -EINVAL;
1265 goto out;
1266 }
1267 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1268 ret = -EINVAL;
1269 goto out;
1270 }
1271 qhp->attr.state = C4IW_QP_STATE_IDLE;
1272 break;
1273 case C4IW_QP_STATE_TERMINATE:
1274 if (!internal) {
1275 ret = -EINVAL;
1276 goto out;
1277 }
1278 goto err;
1279 break;
1280 default:
1281 printk(KERN_ERR "%s in a bad state %d\n",
1282 __func__, qhp->attr.state);
1283 ret = -EINVAL;
1284 goto err;
1285 break;
1286 }
1287 goto out;
1288err:
1289 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1290 qhp->wq.sq.qid);
1291
1292 /* disassociate the LLP connection */
1293 qhp->attr.llp_stream_handle = NULL;
1294 ep = qhp->ep;
1295 qhp->ep = NULL;
1296 qhp->attr.state = C4IW_QP_STATE_ERROR;
1297 free = 1;
1298 wake_up(&qhp->wait);
1299 BUG_ON(!ep);
1300 flush_qp(qhp, &flag);
1301out:
1302 spin_unlock_irqrestore(&qhp->lock, flag);
1303
1304 if (terminate)
be4c9bad 1305 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
cfdda9d7
SW
1306
1307 /*
1308 * If disconnect is 1, then we need to initiate a disconnect
1309 * on the EP. This can be a normal close (RTS->CLOSING) or
1310 * an abnormal close (RTS/CLOSING->ERROR).
1311 */
1312 if (disconnect) {
be4c9bad
RD
1313 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1314 GFP_KERNEL);
cfdda9d7
SW
1315 c4iw_put_ep(&ep->com);
1316 }
1317
1318 /*
1319 * If free is 1, then we've disassociated the EP from the QP
1320 * and we need to dereference the EP.
1321 */
1322 if (free)
1323 c4iw_put_ep(&ep->com);
1324
1325 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1326 return ret;
1327}
1328
1329int c4iw_destroy_qp(struct ib_qp *ib_qp)
1330{
1331 struct c4iw_dev *rhp;
1332 struct c4iw_qp *qhp;
1333 struct c4iw_qp_attributes attrs;
1334 struct c4iw_ucontext *ucontext;
1335
1336 qhp = to_c4iw_qp(ib_qp);
1337 rhp = qhp->rhp;
1338
1339 attrs.next_state = C4IW_QP_STATE_ERROR;
1340 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1341 wait_event(qhp->wait, !qhp->ep);
1342
1343 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1344 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1345 atomic_dec(&qhp->refcnt);
1346 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1347
1348 ucontext = ib_qp->uobject ?
1349 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1350 destroy_qp(&rhp->rdev, &qhp->wq,
1351 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1352
1353 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1354 kfree(qhp);
1355 return 0;
1356}
1357
1358struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1359 struct ib_udata *udata)
1360{
1361 struct c4iw_dev *rhp;
1362 struct c4iw_qp *qhp;
1363 struct c4iw_pd *php;
1364 struct c4iw_cq *schp;
1365 struct c4iw_cq *rchp;
1366 struct c4iw_create_qp_resp uresp;
1367 int sqsize, rqsize;
1368 struct c4iw_ucontext *ucontext;
1369 int ret;
1370 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1371
1372 PDBG("%s ib_pd %p\n", __func__, pd);
1373
1374 if (attrs->qp_type != IB_QPT_RC)
1375 return ERR_PTR(-EINVAL);
1376
1377 php = to_c4iw_pd(pd);
1378 rhp = php->rhp;
1379 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1380 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1381 if (!schp || !rchp)
1382 return ERR_PTR(-EINVAL);
1383
1384 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1385 return ERR_PTR(-EINVAL);
1386
1387 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1388 if (rqsize > T4_MAX_RQ_SIZE)
1389 return ERR_PTR(-E2BIG);
1390
1391 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1392 if (sqsize > T4_MAX_SQ_SIZE)
1393 return ERR_PTR(-E2BIG);
1394
1395 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1396
1397
1398 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1399 if (!qhp)
1400 return ERR_PTR(-ENOMEM);
1401 qhp->wq.sq.size = sqsize;
1402 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1403 qhp->wq.rq.size = rqsize;
1404 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1405
1406 if (ucontext) {
1407 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1408 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1409 }
1410
1411 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1412 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1413
1414 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1415 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1416 if (ret)
1417 goto err1;
1418
1419 attrs->cap.max_recv_wr = rqsize - 1;
1420 attrs->cap.max_send_wr = sqsize - 1;
1421 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1422
1423 qhp->rhp = rhp;
1424 qhp->attr.pd = php->pdid;
1425 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1426 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1427 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1428 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1429 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1430 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1431 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1432 qhp->attr.state = C4IW_QP_STATE_IDLE;
1433 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1434 qhp->attr.enable_rdma_read = 1;
1435 qhp->attr.enable_rdma_write = 1;
1436 qhp->attr.enable_bind = 1;
1437 qhp->attr.max_ord = 1;
1438 qhp->attr.max_ird = 1;
1439 spin_lock_init(&qhp->lock);
1440 init_waitqueue_head(&qhp->wait);
1441 atomic_set(&qhp->refcnt, 1);
1442
1443 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1444 if (ret)
1445 goto err2;
1446
1447 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
1448 if (ret)
1449 goto err3;
1450
1451 if (udata) {
1452 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1453 if (!mm1) {
1454 ret = -ENOMEM;
1455 goto err4;
1456 }
1457 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1458 if (!mm2) {
1459 ret = -ENOMEM;
1460 goto err5;
1461 }
1462 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1463 if (!mm3) {
1464 ret = -ENOMEM;
1465 goto err6;
1466 }
1467 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1468 if (!mm4) {
1469 ret = -ENOMEM;
1470 goto err7;
1471 }
1472
1473 uresp.qid_mask = rhp->rdev.qpmask;
1474 uresp.sqid = qhp->wq.sq.qid;
1475 uresp.sq_size = qhp->wq.sq.size;
1476 uresp.sq_memsize = qhp->wq.sq.memsize;
1477 uresp.rqid = qhp->wq.rq.qid;
1478 uresp.rq_size = qhp->wq.rq.size;
1479 uresp.rq_memsize = qhp->wq.rq.memsize;
1480 spin_lock(&ucontext->mmap_lock);
1481 uresp.sq_key = ucontext->key;
1482 ucontext->key += PAGE_SIZE;
1483 uresp.rq_key = ucontext->key;
1484 ucontext->key += PAGE_SIZE;
1485 uresp.sq_db_gts_key = ucontext->key;
1486 ucontext->key += PAGE_SIZE;
1487 uresp.rq_db_gts_key = ucontext->key;
1488 ucontext->key += PAGE_SIZE;
1489 spin_unlock(&ucontext->mmap_lock);
1490 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1491 if (ret)
1492 goto err8;
1493 mm1->key = uresp.sq_key;
1494 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1495 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1496 insert_mmap(ucontext, mm1);
1497 mm2->key = uresp.rq_key;
1498 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1499 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1500 insert_mmap(ucontext, mm2);
1501 mm3->key = uresp.sq_db_gts_key;
1502 mm3->addr = qhp->wq.sq.udb;
1503 mm3->len = PAGE_SIZE;
1504 insert_mmap(ucontext, mm3);
1505 mm4->key = uresp.rq_db_gts_key;
1506 mm4->addr = qhp->wq.rq.udb;
1507 mm4->len = PAGE_SIZE;
1508 insert_mmap(ucontext, mm4);
1509 }
1510 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1511 init_timer(&(qhp->timer));
1512 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1513 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1514 qhp->wq.sq.qid);
1515 return &qhp->ibqp;
1516err8:
1517 kfree(mm4);
1518err7:
1519 kfree(mm3);
1520err6:
1521 kfree(mm2);
1522err5:
1523 kfree(mm1);
1524err4:
1525 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1526err3:
1527 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1528err2:
1529 destroy_qp(&rhp->rdev, &qhp->wq,
1530 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1531err1:
1532 kfree(qhp);
1533 return ERR_PTR(ret);
1534}
1535
1536int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1537 int attr_mask, struct ib_udata *udata)
1538{
1539 struct c4iw_dev *rhp;
1540 struct c4iw_qp *qhp;
1541 enum c4iw_qp_attr_mask mask = 0;
1542 struct c4iw_qp_attributes attrs;
1543
1544 PDBG("%s ib_qp %p\n", __func__, ibqp);
1545
1546 /* iwarp does not support the RTR state */
1547 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1548 attr_mask &= ~IB_QP_STATE;
1549
1550 /* Make sure we still have something left to do */
1551 if (!attr_mask)
1552 return 0;
1553
1554 memset(&attrs, 0, sizeof attrs);
1555 qhp = to_c4iw_qp(ibqp);
1556 rhp = qhp->rhp;
1557
1558 attrs.next_state = c4iw_convert_state(attr->qp_state);
1559 attrs.enable_rdma_read = (attr->qp_access_flags &
1560 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1561 attrs.enable_rdma_write = (attr->qp_access_flags &
1562 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1563 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1564
1565
1566 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1567 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1568 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1569 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1570 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1571
1572 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1573}
1574
1575struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1576{
1577 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1578 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1579}