Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include "iw_cxgb4.h" | |
33 | ||
34 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |
35 | struct c4iw_dev_ucontext *uctx) | |
36 | { | |
37 | /* | |
38 | * uP clears EQ contexts when the connection exits rdma mode, | |
39 | * so no need to post a RESET WR for these EQs. | |
40 | */ | |
41 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
42 | wq->rq.memsize, wq->rq.queue, | |
f38926aa | 43 | dma_unmap_addr(&wq->rq, mapping)); |
cfdda9d7 SW |
44 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
45 | wq->sq.memsize, wq->sq.queue, | |
f38926aa | 46 | dma_unmap_addr(&wq->sq, mapping)); |
cfdda9d7 SW |
47 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
48 | kfree(wq->rq.sw_rq); | |
49 | kfree(wq->sq.sw_sq); | |
50 | c4iw_put_qpid(rdev, wq->rq.qid, uctx); | |
51 | c4iw_put_qpid(rdev, wq->sq.qid, uctx); | |
52 | return 0; | |
53 | } | |
54 | ||
55 | static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |
56 | struct t4_cq *rcq, struct t4_cq *scq, | |
57 | struct c4iw_dev_ucontext *uctx) | |
58 | { | |
59 | int user = (uctx != &rdev->uctx); | |
60 | struct fw_ri_res_wr *res_wr; | |
61 | struct fw_ri_res *res; | |
62 | int wr_len; | |
63 | struct c4iw_wr_wait wr_wait; | |
64 | struct sk_buff *skb; | |
65 | int ret; | |
66 | int eqsize; | |
67 | ||
68 | wq->sq.qid = c4iw_get_qpid(rdev, uctx); | |
69 | if (!wq->sq.qid) | |
70 | return -ENOMEM; | |
71 | ||
72 | wq->rq.qid = c4iw_get_qpid(rdev, uctx); | |
73 | if (!wq->rq.qid) | |
74 | goto err1; | |
75 | ||
76 | if (!user) { | |
77 | wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, | |
78 | GFP_KERNEL); | |
79 | if (!wq->sq.sw_sq) | |
80 | goto err2; | |
81 | ||
82 | wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, | |
83 | GFP_KERNEL); | |
84 | if (!wq->rq.sw_rq) | |
85 | goto err3; | |
86 | } | |
87 | ||
88 | /* | |
89 | * RQT must be a power of 2. | |
90 | */ | |
91 | wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); | |
92 | wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); | |
93 | if (!wq->rq.rqt_hwaddr) | |
94 | goto err4; | |
95 | ||
96 | wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | |
97 | wq->sq.memsize, &(wq->sq.dma_addr), | |
98 | GFP_KERNEL); | |
99 | if (!wq->sq.queue) | |
100 | goto err5; | |
101 | memset(wq->sq.queue, 0, wq->sq.memsize); | |
f38926aa | 102 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); |
cfdda9d7 SW |
103 | |
104 | wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | |
105 | wq->rq.memsize, &(wq->rq.dma_addr), | |
106 | GFP_KERNEL); | |
107 | if (!wq->rq.queue) | |
108 | goto err6; | |
109 | PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", | |
110 | __func__, wq->sq.queue, | |
111 | (unsigned long long)virt_to_phys(wq->sq.queue), | |
112 | wq->rq.queue, | |
113 | (unsigned long long)virt_to_phys(wq->rq.queue)); | |
114 | memset(wq->rq.queue, 0, wq->rq.memsize); | |
f38926aa | 115 | dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); |
cfdda9d7 SW |
116 | |
117 | wq->db = rdev->lldi.db_reg; | |
118 | wq->gts = rdev->lldi.gts_reg; | |
119 | if (user) { | |
120 | wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | |
121 | (wq->sq.qid << rdev->qpshift); | |
122 | wq->sq.udb &= PAGE_MASK; | |
123 | wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | |
124 | (wq->rq.qid << rdev->qpshift); | |
125 | wq->rq.udb &= PAGE_MASK; | |
126 | } | |
127 | wq->rdev = rdev; | |
128 | wq->rq.msn = 1; | |
129 | ||
130 | /* build fw_ri_res_wr */ | |
131 | wr_len = sizeof *res_wr + 2 * sizeof *res; | |
132 | ||
133 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | |
134 | if (!skb) { | |
135 | ret = -ENOMEM; | |
136 | goto err7; | |
137 | } | |
138 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
139 | ||
140 | res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); | |
141 | memset(res_wr, 0, wr_len); | |
142 | res_wr->op_nres = cpu_to_be32( | |
143 | FW_WR_OP(FW_RI_RES_WR) | | |
144 | V_FW_RI_RES_WR_NRES(2) | | |
145 | FW_WR_COMPL(1)); | |
146 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | |
147 | res_wr->cookie = (u64)&wr_wait; | |
148 | res = res_wr->res; | |
149 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; | |
150 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | |
151 | ||
152 | /* | |
153 | * eqsize is the number of 64B entries plus the status page size. | |
154 | */ | |
155 | eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; | |
156 | ||
157 | res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( | |
158 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ | |
159 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ | |
160 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ | |
161 | V_FW_RI_RES_WR_IQID(scq->cqid)); | |
162 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | |
163 | V_FW_RI_RES_WR_DCAEN(0) | | |
164 | V_FW_RI_RES_WR_DCACPU(0) | | |
165 | V_FW_RI_RES_WR_FBMIN(3) | | |
166 | V_FW_RI_RES_WR_FBMAX(3) | | |
167 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | | |
168 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | | |
169 | V_FW_RI_RES_WR_EQSIZE(eqsize)); | |
170 | res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); | |
171 | res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); | |
172 | res++; | |
173 | res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; | |
174 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | |
175 | ||
176 | /* | |
177 | * eqsize is the number of 64B entries plus the status page size. | |
178 | */ | |
179 | eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; | |
180 | res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( | |
181 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ | |
182 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ | |
183 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ | |
184 | V_FW_RI_RES_WR_IQID(rcq->cqid)); | |
185 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | |
186 | V_FW_RI_RES_WR_DCAEN(0) | | |
187 | V_FW_RI_RES_WR_DCACPU(0) | | |
188 | V_FW_RI_RES_WR_FBMIN(3) | | |
189 | V_FW_RI_RES_WR_FBMAX(3) | | |
190 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | | |
191 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | | |
192 | V_FW_RI_RES_WR_EQSIZE(eqsize)); | |
193 | res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); | |
194 | res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); | |
195 | ||
196 | c4iw_init_wr_wait(&wr_wait); | |
197 | ||
198 | ret = c4iw_ofld_send(rdev, skb); | |
199 | if (ret) | |
200 | goto err7; | |
201 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | |
202 | if (!wr_wait.done) { | |
203 | printk(KERN_ERR MOD "Device %s not responding!\n", | |
204 | pci_name(rdev->lldi.pdev)); | |
205 | rdev->flags = T4_FATAL_ERROR; | |
206 | ret = -EIO; | |
207 | } else | |
208 | ret = wr_wait.ret; | |
209 | if (ret) | |
210 | goto err7; | |
211 | ||
212 | PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", | |
213 | __func__, wq->sq.qid, wq->rq.qid, wq->db, | |
214 | (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); | |
215 | ||
216 | return 0; | |
217 | err7: | |
218 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
219 | wq->rq.memsize, wq->rq.queue, | |
f38926aa | 220 | dma_unmap_addr(&wq->rq, mapping)); |
cfdda9d7 SW |
221 | err6: |
222 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
223 | wq->sq.memsize, wq->sq.queue, | |
f38926aa | 224 | dma_unmap_addr(&wq->sq, mapping)); |
cfdda9d7 SW |
225 | err5: |
226 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); | |
227 | err4: | |
228 | kfree(wq->rq.sw_rq); | |
229 | err3: | |
230 | kfree(wq->sq.sw_sq); | |
231 | err2: | |
232 | c4iw_put_qpid(rdev, wq->rq.qid, uctx); | |
233 | err1: | |
234 | c4iw_put_qpid(rdev, wq->sq.qid, uctx); | |
235 | return -ENOMEM; | |
236 | } | |
237 | ||
238 | static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
239 | { | |
240 | int i; | |
241 | u32 plen; | |
242 | int size; | |
243 | u8 *datap; | |
244 | ||
245 | if (wr->num_sge > T4_MAX_SEND_SGE) | |
246 | return -EINVAL; | |
247 | switch (wr->opcode) { | |
248 | case IB_WR_SEND: | |
249 | if (wr->send_flags & IB_SEND_SOLICITED) | |
250 | wqe->send.sendop_pkd = cpu_to_be32( | |
251 | V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); | |
252 | else | |
253 | wqe->send.sendop_pkd = cpu_to_be32( | |
254 | V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); | |
255 | wqe->send.stag_inv = 0; | |
256 | break; | |
257 | case IB_WR_SEND_WITH_INV: | |
258 | if (wr->send_flags & IB_SEND_SOLICITED) | |
259 | wqe->send.sendop_pkd = cpu_to_be32( | |
260 | V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); | |
261 | else | |
262 | wqe->send.sendop_pkd = cpu_to_be32( | |
263 | V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); | |
264 | wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); | |
265 | break; | |
266 | ||
267 | default: | |
268 | return -EINVAL; | |
269 | } | |
270 | plen = 0; | |
271 | if (wr->num_sge) { | |
272 | if (wr->send_flags & IB_SEND_INLINE) { | |
273 | datap = (u8 *)wqe->send.u.immd_src[0].data; | |
274 | for (i = 0; i < wr->num_sge; i++) { | |
275 | if ((plen + wr->sg_list[i].length) > | |
276 | T4_MAX_SEND_INLINE) { | |
277 | return -EMSGSIZE; | |
278 | } | |
279 | plen += wr->sg_list[i].length; | |
280 | memcpy(datap, | |
281 | (void *)(unsigned long)wr->sg_list[i].addr, | |
282 | wr->sg_list[i].length); | |
283 | datap += wr->sg_list[i].length; | |
284 | } | |
285 | wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
286 | wqe->send.u.immd_src[0].r1 = 0; | |
287 | wqe->send.u.immd_src[0].r2 = 0; | |
288 | wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen); | |
289 | size = sizeof wqe->send + sizeof(struct fw_ri_immd) + | |
290 | plen; | |
291 | } else { | |
292 | for (i = 0; i < wr->num_sge; i++) { | |
293 | if ((plen + wr->sg_list[i].length) < plen) | |
294 | return -EMSGSIZE; | |
295 | plen += wr->sg_list[i].length; | |
296 | wqe->send.u.isgl_src[0].sge[i].stag = | |
297 | cpu_to_be32(wr->sg_list[i].lkey); | |
298 | wqe->send.u.isgl_src[0].sge[i].len = | |
299 | cpu_to_be32(wr->sg_list[i].length); | |
300 | wqe->send.u.isgl_src[0].sge[i].to = | |
301 | cpu_to_be64(wr->sg_list[i].addr); | |
302 | } | |
303 | wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL; | |
304 | wqe->send.u.isgl_src[0].r1 = 0; | |
305 | wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge); | |
306 | wqe->send.u.isgl_src[0].r2 = 0; | |
307 | size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + | |
308 | wr->num_sge * sizeof(struct fw_ri_sge); | |
309 | } | |
310 | } else { | |
311 | wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
312 | wqe->send.u.immd_src[0].r1 = 0; | |
313 | wqe->send.u.immd_src[0].r2 = 0; | |
314 | wqe->send.u.immd_src[0].immdlen = 0; | |
315 | size = sizeof wqe->send + sizeof(struct fw_ri_immd); | |
316 | } | |
317 | *len16 = DIV_ROUND_UP(size, 16); | |
318 | wqe->send.plen = cpu_to_be32(plen); | |
319 | return 0; | |
320 | } | |
321 | ||
322 | static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
323 | { | |
324 | int i; | |
325 | u32 plen; | |
326 | int size; | |
327 | u8 *datap; | |
328 | ||
329 | if (wr->num_sge > T4_MAX_WRITE_SGE) | |
330 | return -EINVAL; | |
331 | wqe->write.r2 = 0; | |
332 | wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); | |
333 | wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); | |
334 | plen = 0; | |
335 | if (wr->num_sge) { | |
336 | if (wr->send_flags & IB_SEND_INLINE) { | |
337 | datap = (u8 *)wqe->write.u.immd_src[0].data; | |
338 | for (i = 0; i < wr->num_sge; i++) { | |
339 | if ((plen + wr->sg_list[i].length) > | |
340 | T4_MAX_WRITE_INLINE) { | |
341 | return -EMSGSIZE; | |
342 | } | |
343 | plen += wr->sg_list[i].length; | |
344 | memcpy(datap, | |
345 | (void *)(unsigned long)wr->sg_list[i].addr, | |
346 | wr->sg_list[i].length); | |
347 | datap += wr->sg_list[i].length; | |
348 | } | |
349 | wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
350 | wqe->write.u.immd_src[0].r1 = 0; | |
351 | wqe->write.u.immd_src[0].r2 = 0; | |
352 | wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen); | |
353 | size = sizeof wqe->write + sizeof(struct fw_ri_immd) + | |
354 | plen; | |
355 | } else { | |
356 | for (i = 0; i < wr->num_sge; i++) { | |
357 | if ((plen + wr->sg_list[i].length) < plen) | |
358 | return -EMSGSIZE; | |
359 | plen += wr->sg_list[i].length; | |
360 | wqe->write.u.isgl_src[0].sge[i].stag = | |
361 | cpu_to_be32(wr->sg_list[i].lkey); | |
362 | wqe->write.u.isgl_src[0].sge[i].len = | |
363 | cpu_to_be32(wr->sg_list[i].length); | |
364 | wqe->write.u.isgl_src[0].sge[i].to = | |
365 | cpu_to_be64(wr->sg_list[i].addr); | |
366 | } | |
367 | wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL; | |
368 | wqe->write.u.isgl_src[0].r1 = 0; | |
369 | wqe->write.u.isgl_src[0].nsge = | |
370 | cpu_to_be16(wr->num_sge); | |
371 | wqe->write.u.isgl_src[0].r2 = 0; | |
372 | size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + | |
373 | wr->num_sge * sizeof(struct fw_ri_sge); | |
374 | } | |
375 | } else { | |
376 | wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
377 | wqe->write.u.immd_src[0].r1 = 0; | |
378 | wqe->write.u.immd_src[0].r2 = 0; | |
379 | wqe->write.u.immd_src[0].immdlen = 0; | |
380 | size = sizeof wqe->write + sizeof(struct fw_ri_immd); | |
381 | } | |
382 | *len16 = DIV_ROUND_UP(size, 16); | |
383 | wqe->write.plen = cpu_to_be32(plen); | |
384 | return 0; | |
385 | } | |
386 | ||
387 | static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
388 | { | |
389 | if (wr->num_sge > 1) | |
390 | return -EINVAL; | |
391 | if (wr->num_sge) { | |
392 | wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); | |
393 | wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr | |
394 | >> 32)); | |
395 | wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); | |
396 | wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); | |
397 | wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); | |
398 | wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr | |
399 | >> 32)); | |
400 | wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); | |
401 | } else { | |
402 | wqe->read.stag_src = cpu_to_be32(2); | |
403 | wqe->read.to_src_hi = 0; | |
404 | wqe->read.to_src_lo = 0; | |
405 | wqe->read.stag_sink = cpu_to_be32(2); | |
406 | wqe->read.plen = 0; | |
407 | wqe->read.to_sink_hi = 0; | |
408 | wqe->read.to_sink_lo = 0; | |
409 | } | |
410 | wqe->read.r2 = 0; | |
411 | wqe->read.r5 = 0; | |
412 | *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); | |
413 | return 0; | |
414 | } | |
415 | ||
416 | static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, | |
417 | struct ib_recv_wr *wr, u8 *len16) | |
418 | { | |
419 | int i; | |
420 | int plen = 0; | |
421 | ||
422 | for (i = 0; i < wr->num_sge; i++) { | |
423 | if ((plen + wr->sg_list[i].length) < plen) | |
424 | return -EMSGSIZE; | |
425 | plen += wr->sg_list[i].length; | |
426 | wqe->recv.isgl.sge[i].stag = | |
427 | cpu_to_be32(wr->sg_list[i].lkey); | |
428 | wqe->recv.isgl.sge[i].len = | |
429 | cpu_to_be32(wr->sg_list[i].length); | |
430 | wqe->recv.isgl.sge[i].to = | |
431 | cpu_to_be64(wr->sg_list[i].addr); | |
432 | } | |
433 | for (; i < T4_MAX_RECV_SGE; i++) { | |
434 | wqe->recv.isgl.sge[i].stag = 0; | |
435 | wqe->recv.isgl.sge[i].len = 0; | |
436 | wqe->recv.isgl.sge[i].to = 0; | |
437 | } | |
438 | wqe->recv.isgl.op = FW_RI_DATA_ISGL; | |
439 | wqe->recv.isgl.r1 = 0; | |
440 | wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge); | |
441 | wqe->recv.isgl.r2 = 0; | |
442 | *len16 = DIV_ROUND_UP(sizeof wqe->recv + | |
443 | wr->num_sge * sizeof(struct fw_ri_sge), 16); | |
444 | return 0; | |
445 | } | |
446 | ||
447 | static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
448 | { | |
449 | ||
450 | struct fw_ri_immd *imdp; | |
451 | __be64 *p; | |
452 | int i; | |
453 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); | |
454 | ||
455 | if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) | |
456 | return -EINVAL; | |
457 | ||
458 | wqe->fr.qpbinde_to_dcacpu = 0; | |
459 | wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; | |
460 | wqe->fr.addr_type = FW_RI_VA_BASED_TO; | |
461 | wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); | |
462 | wqe->fr.len_hi = 0; | |
463 | wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); | |
464 | wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); | |
465 | wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); | |
466 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & | |
467 | 0xffffffff); | |
468 | if (pbllen > T4_MAX_FR_IMMD) { | |
469 | struct c4iw_fr_page_list *c4pl = | |
470 | to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); | |
471 | struct fw_ri_dsgl *sglp; | |
472 | ||
473 | sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); | |
474 | sglp->op = FW_RI_DATA_DSGL; | |
475 | sglp->r1 = 0; | |
476 | sglp->nsge = cpu_to_be16(1); | |
477 | sglp->addr0 = cpu_to_be64(c4pl->dma_addr); | |
478 | sglp->len0 = cpu_to_be32(pbllen); | |
479 | ||
480 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16); | |
481 | } else { | |
482 | imdp = (struct fw_ri_immd *)(&wqe->fr + 1); | |
483 | imdp->op = FW_RI_DATA_IMMD; | |
484 | imdp->r1 = 0; | |
485 | imdp->r2 = 0; | |
486 | imdp->immdlen = cpu_to_be32(pbllen); | |
487 | p = (__be64 *)(imdp + 1); | |
488 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) | |
489 | *p = cpu_to_be64( | |
490 | (u64)wr->wr.fast_reg.page_list->page_list[i]); | |
491 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, | |
492 | 16); | |
493 | } | |
494 | return 0; | |
495 | } | |
496 | ||
497 | static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, | |
498 | u8 *len16) | |
499 | { | |
500 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); | |
501 | wqe->inv.r2 = 0; | |
502 | *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); | |
503 | return 0; | |
504 | } | |
505 | ||
506 | void c4iw_qp_add_ref(struct ib_qp *qp) | |
507 | { | |
508 | PDBG("%s ib_qp %p\n", __func__, qp); | |
509 | atomic_inc(&(to_c4iw_qp(qp)->refcnt)); | |
510 | } | |
511 | ||
512 | void c4iw_qp_rem_ref(struct ib_qp *qp) | |
513 | { | |
514 | PDBG("%s ib_qp %p\n", __func__, qp); | |
515 | if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) | |
516 | wake_up(&(to_c4iw_qp(qp)->wait)); | |
517 | } | |
518 | ||
519 | int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
520 | struct ib_send_wr **bad_wr) | |
521 | { | |
522 | int err = 0; | |
523 | u8 len16 = 0; | |
524 | enum fw_wr_opcodes fw_opcode = 0; | |
525 | enum fw_ri_wr_flags fw_flags; | |
526 | struct c4iw_qp *qhp; | |
527 | union t4_wr *wqe; | |
528 | u32 num_wrs; | |
529 | struct t4_swsqe *swsqe; | |
530 | unsigned long flag; | |
531 | u16 idx = 0; | |
532 | ||
533 | qhp = to_c4iw_qp(ibqp); | |
534 | spin_lock_irqsave(&qhp->lock, flag); | |
535 | if (t4_wq_in_error(&qhp->wq)) { | |
536 | spin_unlock_irqrestore(&qhp->lock, flag); | |
537 | return -EINVAL; | |
538 | } | |
539 | num_wrs = t4_sq_avail(&qhp->wq); | |
540 | if (num_wrs == 0) { | |
541 | spin_unlock_irqrestore(&qhp->lock, flag); | |
542 | return -ENOMEM; | |
543 | } | |
544 | while (wr) { | |
545 | if (num_wrs == 0) { | |
546 | err = -ENOMEM; | |
547 | *bad_wr = wr; | |
548 | break; | |
549 | } | |
550 | wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx]; | |
551 | fw_flags = 0; | |
552 | if (wr->send_flags & IB_SEND_SOLICITED) | |
553 | fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; | |
554 | if (wr->send_flags & IB_SEND_SIGNALED) | |
555 | fw_flags |= FW_RI_COMPLETION_FLAG; | |
556 | swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; | |
557 | switch (wr->opcode) { | |
558 | case IB_WR_SEND_WITH_INV: | |
559 | case IB_WR_SEND: | |
560 | if (wr->send_flags & IB_SEND_FENCE) | |
561 | fw_flags |= FW_RI_READ_FENCE_FLAG; | |
562 | fw_opcode = FW_RI_SEND_WR; | |
563 | if (wr->opcode == IB_WR_SEND) | |
564 | swsqe->opcode = FW_RI_SEND; | |
565 | else | |
566 | swsqe->opcode = FW_RI_SEND_WITH_INV; | |
567 | err = build_rdma_send(wqe, wr, &len16); | |
568 | break; | |
569 | case IB_WR_RDMA_WRITE: | |
570 | fw_opcode = FW_RI_RDMA_WRITE_WR; | |
571 | swsqe->opcode = FW_RI_RDMA_WRITE; | |
572 | err = build_rdma_write(wqe, wr, &len16); | |
573 | break; | |
574 | case IB_WR_RDMA_READ: | |
2f1fb507 | 575 | case IB_WR_RDMA_READ_WITH_INV: |
cfdda9d7 SW |
576 | fw_opcode = FW_RI_RDMA_READ_WR; |
577 | swsqe->opcode = FW_RI_READ_REQ; | |
2f1fb507 SW |
578 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) |
579 | fw_flags |= FW_RI_RDMA_READ_INVALIDATE; | |
580 | else | |
581 | fw_flags = 0; | |
cfdda9d7 SW |
582 | err = build_rdma_read(wqe, wr, &len16); |
583 | if (err) | |
584 | break; | |
585 | swsqe->read_len = wr->sg_list[0].length; | |
586 | if (!qhp->wq.sq.oldest_read) | |
587 | qhp->wq.sq.oldest_read = swsqe; | |
588 | break; | |
589 | case IB_WR_FAST_REG_MR: | |
590 | fw_opcode = FW_RI_FR_NSMR_WR; | |
591 | swsqe->opcode = FW_RI_FAST_REGISTER; | |
592 | err = build_fastreg(wqe, wr, &len16); | |
593 | break; | |
594 | case IB_WR_LOCAL_INV: | |
4ab1eb9c SW |
595 | if (wr->send_flags & IB_SEND_FENCE) |
596 | fw_flags |= FW_RI_LOCAL_FENCE_FLAG; | |
cfdda9d7 SW |
597 | fw_opcode = FW_RI_INV_LSTAG_WR; |
598 | swsqe->opcode = FW_RI_LOCAL_INV; | |
599 | err = build_inv_stag(wqe, wr, &len16); | |
600 | break; | |
601 | default: | |
602 | PDBG("%s post of type=%d TBD!\n", __func__, | |
603 | wr->opcode); | |
604 | err = -EINVAL; | |
605 | } | |
606 | if (err) { | |
607 | *bad_wr = wr; | |
608 | break; | |
609 | } | |
610 | swsqe->idx = qhp->wq.sq.pidx; | |
611 | swsqe->complete = 0; | |
612 | swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED); | |
613 | swsqe->wr_id = wr->wr_id; | |
614 | ||
615 | init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); | |
616 | ||
617 | PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n", | |
618 | __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, | |
619 | swsqe->opcode, swsqe->read_len); | |
620 | wr = wr->next; | |
621 | num_wrs--; | |
622 | t4_sq_produce(&qhp->wq); | |
623 | idx++; | |
624 | } | |
625 | if (t4_wq_db_enabled(&qhp->wq)) | |
626 | t4_ring_sq_db(&qhp->wq, idx); | |
627 | spin_unlock_irqrestore(&qhp->lock, flag); | |
628 | return err; | |
629 | } | |
630 | ||
631 | int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
632 | struct ib_recv_wr **bad_wr) | |
633 | { | |
634 | int err = 0; | |
635 | struct c4iw_qp *qhp; | |
636 | union t4_recv_wr *wqe; | |
637 | u32 num_wrs; | |
638 | u8 len16 = 0; | |
639 | unsigned long flag; | |
640 | u16 idx = 0; | |
641 | ||
642 | qhp = to_c4iw_qp(ibqp); | |
643 | spin_lock_irqsave(&qhp->lock, flag); | |
644 | if (t4_wq_in_error(&qhp->wq)) { | |
645 | spin_unlock_irqrestore(&qhp->lock, flag); | |
646 | return -EINVAL; | |
647 | } | |
648 | num_wrs = t4_rq_avail(&qhp->wq); | |
649 | if (num_wrs == 0) { | |
650 | spin_unlock_irqrestore(&qhp->lock, flag); | |
651 | return -ENOMEM; | |
652 | } | |
653 | while (wr) { | |
654 | if (wr->num_sge > T4_MAX_RECV_SGE) { | |
655 | err = -EINVAL; | |
656 | *bad_wr = wr; | |
657 | break; | |
658 | } | |
659 | wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx]; | |
660 | if (num_wrs) | |
661 | err = build_rdma_recv(qhp, wqe, wr, &len16); | |
662 | else | |
663 | err = -ENOMEM; | |
664 | if (err) { | |
665 | *bad_wr = wr; | |
666 | break; | |
667 | } | |
668 | ||
669 | qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; | |
670 | ||
671 | wqe->recv.opcode = FW_RI_RECV_WR; | |
672 | wqe->recv.r1 = 0; | |
673 | wqe->recv.wrid = qhp->wq.rq.pidx; | |
674 | wqe->recv.r2[0] = 0; | |
675 | wqe->recv.r2[1] = 0; | |
676 | wqe->recv.r2[2] = 0; | |
677 | wqe->recv.len16 = len16; | |
678 | if (len16 < 5) | |
679 | wqe->flits[8] = 0; | |
680 | ||
681 | PDBG("%s cookie 0x%llx pidx %u\n", __func__, | |
682 | (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); | |
683 | t4_rq_produce(&qhp->wq); | |
684 | wr = wr->next; | |
685 | num_wrs--; | |
686 | idx++; | |
687 | } | |
688 | if (t4_wq_db_enabled(&qhp->wq)) | |
689 | t4_ring_rq_db(&qhp->wq, idx); | |
690 | spin_unlock_irqrestore(&qhp->lock, flag); | |
691 | return err; | |
692 | } | |
693 | ||
694 | int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) | |
695 | { | |
696 | return -ENOSYS; | |
697 | } | |
698 | ||
699 | static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, | |
700 | u8 *ecode) | |
701 | { | |
702 | int status; | |
703 | int tagged; | |
704 | int opcode; | |
705 | int rqtype; | |
706 | int send_inv; | |
707 | ||
708 | if (!err_cqe) { | |
709 | *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; | |
710 | *ecode = 0; | |
711 | return; | |
712 | } | |
713 | ||
714 | status = CQE_STATUS(err_cqe); | |
715 | opcode = CQE_OPCODE(err_cqe); | |
716 | rqtype = RQ_TYPE(err_cqe); | |
717 | send_inv = (opcode == FW_RI_SEND_WITH_INV) || | |
718 | (opcode == FW_RI_SEND_WITH_SE_INV); | |
719 | tagged = (opcode == FW_RI_RDMA_WRITE) || | |
720 | (rqtype && (opcode == FW_RI_READ_RESP)); | |
721 | ||
722 | switch (status) { | |
723 | case T4_ERR_STAG: | |
724 | if (send_inv) { | |
725 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
726 | *ecode = RDMAP_CANT_INV_STAG; | |
727 | } else { | |
728 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
729 | *ecode = RDMAP_INV_STAG; | |
730 | } | |
731 | break; | |
732 | case T4_ERR_PDID: | |
733 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
734 | if ((opcode == FW_RI_SEND_WITH_INV) || | |
735 | (opcode == FW_RI_SEND_WITH_SE_INV)) | |
736 | *ecode = RDMAP_CANT_INV_STAG; | |
737 | else | |
738 | *ecode = RDMAP_STAG_NOT_ASSOC; | |
739 | break; | |
740 | case T4_ERR_QPID: | |
741 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
742 | *ecode = RDMAP_STAG_NOT_ASSOC; | |
743 | break; | |
744 | case T4_ERR_ACCESS: | |
745 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
746 | *ecode = RDMAP_ACC_VIOL; | |
747 | break; | |
748 | case T4_ERR_WRAP: | |
749 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
750 | *ecode = RDMAP_TO_WRAP; | |
751 | break; | |
752 | case T4_ERR_BOUND: | |
753 | if (tagged) { | |
754 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
755 | *ecode = DDPT_BASE_BOUNDS; | |
756 | } else { | |
757 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
758 | *ecode = RDMAP_BASE_BOUNDS; | |
759 | } | |
760 | break; | |
761 | case T4_ERR_INVALIDATE_SHARED_MR: | |
762 | case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: | |
763 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
764 | *ecode = RDMAP_CANT_INV_STAG; | |
765 | break; | |
766 | case T4_ERR_ECC: | |
767 | case T4_ERR_ECC_PSTAG: | |
768 | case T4_ERR_INTERNAL_ERR: | |
769 | *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; | |
770 | *ecode = 0; | |
771 | break; | |
772 | case T4_ERR_OUT_OF_RQE: | |
773 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
774 | *ecode = DDPU_INV_MSN_NOBUF; | |
775 | break; | |
776 | case T4_ERR_PBL_ADDR_BOUND: | |
777 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
778 | *ecode = DDPT_BASE_BOUNDS; | |
779 | break; | |
780 | case T4_ERR_CRC: | |
781 | *layer_type = LAYER_MPA|DDP_LLP; | |
782 | *ecode = MPA_CRC_ERR; | |
783 | break; | |
784 | case T4_ERR_MARKER: | |
785 | *layer_type = LAYER_MPA|DDP_LLP; | |
786 | *ecode = MPA_MARKER_ERR; | |
787 | break; | |
788 | case T4_ERR_PDU_LEN_ERR: | |
789 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
790 | *ecode = DDPU_MSG_TOOBIG; | |
791 | break; | |
792 | case T4_ERR_DDP_VERSION: | |
793 | if (tagged) { | |
794 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
795 | *ecode = DDPT_INV_VERS; | |
796 | } else { | |
797 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
798 | *ecode = DDPU_INV_VERS; | |
799 | } | |
800 | break; | |
801 | case T4_ERR_RDMA_VERSION: | |
802 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
803 | *ecode = RDMAP_INV_VERS; | |
804 | break; | |
805 | case T4_ERR_OPCODE: | |
806 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
807 | *ecode = RDMAP_INV_OPCODE; | |
808 | break; | |
809 | case T4_ERR_DDP_QUEUE_NUM: | |
810 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
811 | *ecode = DDPU_INV_QN; | |
812 | break; | |
813 | case T4_ERR_MSN: | |
814 | case T4_ERR_MSN_GAP: | |
815 | case T4_ERR_MSN_RANGE: | |
816 | case T4_ERR_IRD_OVERFLOW: | |
817 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
818 | *ecode = DDPU_INV_MSN_RANGE; | |
819 | break; | |
820 | case T4_ERR_TBIT: | |
821 | *layer_type = LAYER_DDP|DDP_LOCAL_CATA; | |
822 | *ecode = 0; | |
823 | break; | |
824 | case T4_ERR_MO: | |
825 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
826 | *ecode = DDPU_INV_MO; | |
827 | break; | |
828 | default: | |
829 | *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; | |
830 | *ecode = 0; | |
831 | break; | |
832 | } | |
833 | } | |
834 | ||
835 | int c4iw_post_zb_read(struct c4iw_qp *qhp) | |
836 | { | |
837 | union t4_wr *wqe; | |
838 | struct sk_buff *skb; | |
839 | u8 len16; | |
840 | ||
841 | PDBG("%s enter\n", __func__); | |
842 | skb = alloc_skb(40, GFP_KERNEL); | |
843 | if (!skb) { | |
844 | printk(KERN_ERR "%s cannot send zb_read!!\n", __func__); | |
845 | return -ENOMEM; | |
846 | } | |
847 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | |
848 | ||
849 | wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read); | |
850 | memset(wqe, 0, sizeof wqe->read); | |
851 | wqe->read.r2 = cpu_to_be64(0); | |
852 | wqe->read.stag_sink = cpu_to_be32(1); | |
853 | wqe->read.to_sink_hi = cpu_to_be32(0); | |
854 | wqe->read.to_sink_lo = cpu_to_be32(1); | |
855 | wqe->read.stag_src = cpu_to_be32(1); | |
856 | wqe->read.plen = cpu_to_be32(0); | |
857 | wqe->read.to_src_hi = cpu_to_be32(0); | |
858 | wqe->read.to_src_lo = cpu_to_be32(1); | |
859 | len16 = DIV_ROUND_UP(sizeof wqe->read, 16); | |
860 | init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16); | |
861 | ||
862 | return c4iw_ofld_send(&qhp->rhp->rdev, skb); | |
863 | } | |
864 | ||
be4c9bad RD |
865 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, |
866 | gfp_t gfp) | |
cfdda9d7 SW |
867 | { |
868 | struct fw_ri_wr *wqe; | |
869 | struct sk_buff *skb; | |
870 | struct terminate_message *term; | |
871 | ||
872 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | |
873 | qhp->ep->hwtid); | |
874 | ||
be4c9bad | 875 | skb = alloc_skb(sizeof *wqe, gfp); |
cfdda9d7 | 876 | if (!skb) |
be4c9bad | 877 | return; |
cfdda9d7 SW |
878 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); |
879 | ||
880 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
881 | memset(wqe, 0, sizeof *wqe); | |
882 | wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR)); | |
883 | wqe->flowid_len16 = cpu_to_be32( | |
884 | FW_WR_FLOWID(qhp->ep->hwtid) | | |
885 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | |
886 | ||
887 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; | |
888 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); | |
889 | term = (struct terminate_message *)wqe->u.terminate.termmsg; | |
890 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | |
be4c9bad | 891 | c4iw_ofld_send(&qhp->rhp->rdev, skb); |
cfdda9d7 SW |
892 | } |
893 | ||
894 | /* | |
895 | * Assumes qhp lock is held. | |
896 | */ | |
897 | static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |
898 | struct c4iw_cq *schp, unsigned long *flag) | |
899 | { | |
900 | int count; | |
901 | int flushed; | |
902 | ||
903 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | |
904 | /* take a ref on the qhp since we must release the lock */ | |
905 | atomic_inc(&qhp->refcnt); | |
906 | spin_unlock_irqrestore(&qhp->lock, *flag); | |
907 | ||
908 | /* locking heirarchy: cq lock first, then qp lock. */ | |
909 | spin_lock_irqsave(&rchp->lock, *flag); | |
910 | spin_lock(&qhp->lock); | |
911 | c4iw_flush_hw_cq(&rchp->cq); | |
912 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); | |
913 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); | |
914 | spin_unlock(&qhp->lock); | |
915 | spin_unlock_irqrestore(&rchp->lock, *flag); | |
916 | if (flushed) | |
917 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | |
918 | ||
919 | /* locking heirarchy: cq lock first, then qp lock. */ | |
920 | spin_lock_irqsave(&schp->lock, *flag); | |
921 | spin_lock(&qhp->lock); | |
922 | c4iw_flush_hw_cq(&schp->cq); | |
923 | c4iw_count_scqes(&schp->cq, &qhp->wq, &count); | |
924 | flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); | |
925 | spin_unlock(&qhp->lock); | |
926 | spin_unlock_irqrestore(&schp->lock, *flag); | |
927 | if (flushed) | |
928 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | |
929 | ||
930 | /* deref */ | |
931 | if (atomic_dec_and_test(&qhp->refcnt)) | |
932 | wake_up(&qhp->wait); | |
933 | ||
934 | spin_lock_irqsave(&qhp->lock, *flag); | |
935 | } | |
936 | ||
937 | static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag) | |
938 | { | |
939 | struct c4iw_cq *rchp, *schp; | |
940 | ||
941 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); | |
942 | schp = get_chp(qhp->rhp, qhp->attr.scq); | |
943 | ||
944 | if (qhp->ibqp.uobject) { | |
945 | t4_set_wq_in_error(&qhp->wq); | |
946 | t4_set_cq_in_error(&rchp->cq); | |
947 | if (schp != rchp) | |
948 | t4_set_cq_in_error(&schp->cq); | |
949 | return; | |
950 | } | |
951 | __flush_qp(qhp, rchp, schp, flag); | |
952 | } | |
953 | ||
954 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |
955 | { | |
956 | struct fw_ri_wr *wqe; | |
957 | int ret; | |
958 | struct c4iw_wr_wait wr_wait; | |
959 | struct sk_buff *skb; | |
960 | ||
961 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | |
962 | qhp->ep->hwtid); | |
963 | ||
964 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); | |
965 | if (!skb) | |
966 | return -ENOMEM; | |
967 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | |
968 | ||
969 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
970 | memset(wqe, 0, sizeof *wqe); | |
971 | wqe->op_compl = cpu_to_be32( | |
972 | FW_WR_OP(FW_RI_INIT_WR) | | |
973 | FW_WR_COMPL(1)); | |
974 | wqe->flowid_len16 = cpu_to_be32( | |
975 | FW_WR_FLOWID(qhp->ep->hwtid) | | |
976 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | |
977 | wqe->cookie = (u64)&wr_wait; | |
978 | ||
979 | wqe->u.fini.type = FW_RI_TYPE_FINI; | |
980 | c4iw_init_wr_wait(&wr_wait); | |
981 | ret = c4iw_ofld_send(&rhp->rdev, skb); | |
982 | if (ret) | |
983 | goto out; | |
984 | ||
985 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | |
986 | if (!wr_wait.done) { | |
987 | printk(KERN_ERR MOD "Device %s not responding!\n", | |
988 | pci_name(rhp->rdev.lldi.pdev)); | |
989 | rhp->rdev.flags = T4_FATAL_ERROR; | |
990 | ret = -EIO; | |
991 | } else { | |
992 | ret = wr_wait.ret; | |
993 | if (ret) | |
994 | printk(KERN_WARNING MOD | |
995 | "%s: Abnormal close qpid %d ret %u\n", | |
996 | pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid, | |
997 | ret); | |
998 | } | |
999 | out: | |
1000 | PDBG("%s ret %d\n", __func__, ret); | |
1001 | return ret; | |
1002 | } | |
1003 | ||
1004 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) | |
1005 | { | |
1006 | memset(&init->u, 0, sizeof init->u); | |
1007 | switch (p2p_type) { | |
1008 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: | |
1009 | init->u.write.opcode = FW_RI_RDMA_WRITE_WR; | |
1010 | init->u.write.stag_sink = cpu_to_be32(1); | |
1011 | init->u.write.to_sink = cpu_to_be64(1); | |
1012 | init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
1013 | init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + | |
1014 | sizeof(struct fw_ri_immd), | |
1015 | 16); | |
1016 | break; | |
1017 | case FW_RI_INIT_P2PTYPE_READ_REQ: | |
1018 | init->u.write.opcode = FW_RI_RDMA_READ_WR; | |
1019 | init->u.read.stag_src = cpu_to_be32(1); | |
1020 | init->u.read.to_src_lo = cpu_to_be32(1); | |
1021 | init->u.read.stag_sink = cpu_to_be32(1); | |
1022 | init->u.read.to_sink_lo = cpu_to_be32(1); | |
1023 | init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); | |
1024 | break; | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |
1029 | { | |
1030 | struct fw_ri_wr *wqe; | |
1031 | int ret; | |
1032 | struct c4iw_wr_wait wr_wait; | |
1033 | struct sk_buff *skb; | |
1034 | ||
1035 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | |
1036 | qhp->ep->hwtid); | |
1037 | ||
1038 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); | |
1039 | if (!skb) | |
1040 | return -ENOMEM; | |
1041 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | |
1042 | ||
1043 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
1044 | memset(wqe, 0, sizeof *wqe); | |
1045 | wqe->op_compl = cpu_to_be32( | |
1046 | FW_WR_OP(FW_RI_INIT_WR) | | |
1047 | FW_WR_COMPL(1)); | |
1048 | wqe->flowid_len16 = cpu_to_be32( | |
1049 | FW_WR_FLOWID(qhp->ep->hwtid) | | |
1050 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | |
1051 | ||
1052 | wqe->cookie = (u64)&wr_wait; | |
1053 | ||
1054 | wqe->u.init.type = FW_RI_TYPE_INIT; | |
1055 | wqe->u.init.mpareqbit_p2ptype = | |
1056 | V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | | |
1057 | V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); | |
1058 | wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; | |
1059 | if (qhp->attr.mpa_attr.recv_marker_enabled) | |
1060 | wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; | |
1061 | if (qhp->attr.mpa_attr.xmit_marker_enabled) | |
1062 | wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; | |
1063 | if (qhp->attr.mpa_attr.crc_enabled) | |
1064 | wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; | |
1065 | ||
1066 | wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | | |
1067 | FW_RI_QP_RDMA_WRITE_ENABLE | | |
1068 | FW_RI_QP_BIND_ENABLE; | |
1069 | if (!qhp->ibqp.uobject) | |
1070 | wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | | |
1071 | FW_RI_QP_STAG0_ENABLE; | |
1072 | wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); | |
1073 | wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); | |
1074 | wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); | |
1075 | wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); | |
1076 | wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); | |
1077 | wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); | |
1078 | wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); | |
1079 | wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); | |
1080 | wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); | |
1081 | wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); | |
1082 | wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); | |
1083 | wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); | |
1084 | wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - | |
1085 | rhp->rdev.lldi.vr->rq.start); | |
1086 | if (qhp->attr.mpa_attr.initiator) | |
1087 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); | |
1088 | ||
1089 | c4iw_init_wr_wait(&wr_wait); | |
1090 | ret = c4iw_ofld_send(&rhp->rdev, skb); | |
1091 | if (ret) | |
1092 | goto out; | |
1093 | ||
1094 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | |
1095 | if (!wr_wait.done) { | |
1096 | printk(KERN_ERR MOD "Device %s not responding!\n", | |
1097 | pci_name(rhp->rdev.lldi.pdev)); | |
1098 | rhp->rdev.flags = T4_FATAL_ERROR; | |
1099 | ret = -EIO; | |
1100 | } else | |
1101 | ret = wr_wait.ret; | |
1102 | out: | |
1103 | PDBG("%s ret %d\n", __func__, ret); | |
1104 | return ret; | |
1105 | } | |
1106 | ||
1107 | int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |
1108 | enum c4iw_qp_attr_mask mask, | |
1109 | struct c4iw_qp_attributes *attrs, | |
1110 | int internal) | |
1111 | { | |
1112 | int ret = 0; | |
1113 | struct c4iw_qp_attributes newattr = qhp->attr; | |
1114 | unsigned long flag; | |
1115 | int disconnect = 0; | |
1116 | int terminate = 0; | |
1117 | int abort = 0; | |
1118 | int free = 0; | |
1119 | struct c4iw_ep *ep = NULL; | |
1120 | ||
1121 | PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__, | |
1122 | qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, | |
1123 | (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); | |
1124 | ||
1125 | spin_lock_irqsave(&qhp->lock, flag); | |
1126 | ||
1127 | /* Process attr changes if in IDLE */ | |
1128 | if (mask & C4IW_QP_ATTR_VALID_MODIFY) { | |
1129 | if (qhp->attr.state != C4IW_QP_STATE_IDLE) { | |
1130 | ret = -EIO; | |
1131 | goto out; | |
1132 | } | |
1133 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) | |
1134 | newattr.enable_rdma_read = attrs->enable_rdma_read; | |
1135 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) | |
1136 | newattr.enable_rdma_write = attrs->enable_rdma_write; | |
1137 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) | |
1138 | newattr.enable_bind = attrs->enable_bind; | |
1139 | if (mask & C4IW_QP_ATTR_MAX_ORD) { | |
be4c9bad | 1140 | if (attrs->max_ord > c4iw_max_read_depth) { |
cfdda9d7 SW |
1141 | ret = -EINVAL; |
1142 | goto out; | |
1143 | } | |
1144 | newattr.max_ord = attrs->max_ord; | |
1145 | } | |
1146 | if (mask & C4IW_QP_ATTR_MAX_IRD) { | |
be4c9bad | 1147 | if (attrs->max_ird > c4iw_max_read_depth) { |
cfdda9d7 SW |
1148 | ret = -EINVAL; |
1149 | goto out; | |
1150 | } | |
1151 | newattr.max_ird = attrs->max_ird; | |
1152 | } | |
1153 | qhp->attr = newattr; | |
1154 | } | |
1155 | ||
1156 | if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) | |
1157 | goto out; | |
1158 | if (qhp->attr.state == attrs->next_state) | |
1159 | goto out; | |
1160 | ||
1161 | switch (qhp->attr.state) { | |
1162 | case C4IW_QP_STATE_IDLE: | |
1163 | switch (attrs->next_state) { | |
1164 | case C4IW_QP_STATE_RTS: | |
1165 | if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { | |
1166 | ret = -EINVAL; | |
1167 | goto out; | |
1168 | } | |
1169 | if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { | |
1170 | ret = -EINVAL; | |
1171 | goto out; | |
1172 | } | |
1173 | qhp->attr.mpa_attr = attrs->mpa_attr; | |
1174 | qhp->attr.llp_stream_handle = attrs->llp_stream_handle; | |
1175 | qhp->ep = qhp->attr.llp_stream_handle; | |
1176 | qhp->attr.state = C4IW_QP_STATE_RTS; | |
1177 | ||
1178 | /* | |
1179 | * Ref the endpoint here and deref when we | |
1180 | * disassociate the endpoint from the QP. This | |
1181 | * happens in CLOSING->IDLE transition or *->ERROR | |
1182 | * transition. | |
1183 | */ | |
1184 | c4iw_get_ep(&qhp->ep->com); | |
1185 | spin_unlock_irqrestore(&qhp->lock, flag); | |
1186 | ret = rdma_init(rhp, qhp); | |
1187 | spin_lock_irqsave(&qhp->lock, flag); | |
1188 | if (ret) | |
1189 | goto err; | |
1190 | break; | |
1191 | case C4IW_QP_STATE_ERROR: | |
1192 | qhp->attr.state = C4IW_QP_STATE_ERROR; | |
1193 | flush_qp(qhp, &flag); | |
1194 | break; | |
1195 | default: | |
1196 | ret = -EINVAL; | |
1197 | goto out; | |
1198 | } | |
1199 | break; | |
1200 | case C4IW_QP_STATE_RTS: | |
1201 | switch (attrs->next_state) { | |
1202 | case C4IW_QP_STATE_CLOSING: | |
1203 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); | |
1204 | qhp->attr.state = C4IW_QP_STATE_CLOSING; | |
1205 | if (!internal) { | |
1206 | abort = 0; | |
1207 | disconnect = 1; | |
1208 | ep = qhp->ep; | |
1209 | c4iw_get_ep(&ep->com); | |
1210 | } | |
1211 | spin_unlock_irqrestore(&qhp->lock, flag); | |
1212 | ret = rdma_fini(rhp, qhp); | |
1213 | spin_lock_irqsave(&qhp->lock, flag); | |
1214 | if (ret) { | |
1215 | ep = qhp->ep; | |
1216 | c4iw_get_ep(&ep->com); | |
1217 | disconnect = abort = 1; | |
1218 | goto err; | |
1219 | } | |
1220 | break; | |
1221 | case C4IW_QP_STATE_TERMINATE: | |
1222 | qhp->attr.state = C4IW_QP_STATE_TERMINATE; | |
1223 | if (qhp->ibqp.uobject) | |
1224 | t4_set_wq_in_error(&qhp->wq); | |
be4c9bad RD |
1225 | ep = qhp->ep; |
1226 | c4iw_get_ep(&ep->com); | |
1227 | terminate = 1; | |
1228 | disconnect = 1; | |
cfdda9d7 SW |
1229 | break; |
1230 | case C4IW_QP_STATE_ERROR: | |
1231 | qhp->attr.state = C4IW_QP_STATE_ERROR; | |
1232 | if (!internal) { | |
1233 | abort = 1; | |
1234 | disconnect = 1; | |
1235 | ep = qhp->ep; | |
1236 | c4iw_get_ep(&ep->com); | |
1237 | } | |
1238 | goto err; | |
1239 | break; | |
1240 | default: | |
1241 | ret = -EINVAL; | |
1242 | goto out; | |
1243 | } | |
1244 | break; | |
1245 | case C4IW_QP_STATE_CLOSING: | |
1246 | if (!internal) { | |
1247 | ret = -EINVAL; | |
1248 | goto out; | |
1249 | } | |
1250 | switch (attrs->next_state) { | |
1251 | case C4IW_QP_STATE_IDLE: | |
1252 | flush_qp(qhp, &flag); | |
1253 | qhp->attr.state = C4IW_QP_STATE_IDLE; | |
1254 | qhp->attr.llp_stream_handle = NULL; | |
1255 | c4iw_put_ep(&qhp->ep->com); | |
1256 | qhp->ep = NULL; | |
1257 | wake_up(&qhp->wait); | |
1258 | break; | |
1259 | case C4IW_QP_STATE_ERROR: | |
1260 | goto err; | |
1261 | default: | |
1262 | ret = -EINVAL; | |
1263 | goto err; | |
1264 | } | |
1265 | break; | |
1266 | case C4IW_QP_STATE_ERROR: | |
1267 | if (attrs->next_state != C4IW_QP_STATE_IDLE) { | |
1268 | ret = -EINVAL; | |
1269 | goto out; | |
1270 | } | |
1271 | if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { | |
1272 | ret = -EINVAL; | |
1273 | goto out; | |
1274 | } | |
1275 | qhp->attr.state = C4IW_QP_STATE_IDLE; | |
1276 | break; | |
1277 | case C4IW_QP_STATE_TERMINATE: | |
1278 | if (!internal) { | |
1279 | ret = -EINVAL; | |
1280 | goto out; | |
1281 | } | |
1282 | goto err; | |
1283 | break; | |
1284 | default: | |
1285 | printk(KERN_ERR "%s in a bad state %d\n", | |
1286 | __func__, qhp->attr.state); | |
1287 | ret = -EINVAL; | |
1288 | goto err; | |
1289 | break; | |
1290 | } | |
1291 | goto out; | |
1292 | err: | |
1293 | PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, | |
1294 | qhp->wq.sq.qid); | |
1295 | ||
1296 | /* disassociate the LLP connection */ | |
1297 | qhp->attr.llp_stream_handle = NULL; | |
1298 | ep = qhp->ep; | |
1299 | qhp->ep = NULL; | |
1300 | qhp->attr.state = C4IW_QP_STATE_ERROR; | |
1301 | free = 1; | |
1302 | wake_up(&qhp->wait); | |
1303 | BUG_ON(!ep); | |
1304 | flush_qp(qhp, &flag); | |
1305 | out: | |
1306 | spin_unlock_irqrestore(&qhp->lock, flag); | |
1307 | ||
1308 | if (terminate) | |
be4c9bad | 1309 | post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); |
cfdda9d7 SW |
1310 | |
1311 | /* | |
1312 | * If disconnect is 1, then we need to initiate a disconnect | |
1313 | * on the EP. This can be a normal close (RTS->CLOSING) or | |
1314 | * an abnormal close (RTS/CLOSING->ERROR). | |
1315 | */ | |
1316 | if (disconnect) { | |
be4c9bad RD |
1317 | c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : |
1318 | GFP_KERNEL); | |
cfdda9d7 SW |
1319 | c4iw_put_ep(&ep->com); |
1320 | } | |
1321 | ||
1322 | /* | |
1323 | * If free is 1, then we've disassociated the EP from the QP | |
1324 | * and we need to dereference the EP. | |
1325 | */ | |
1326 | if (free) | |
1327 | c4iw_put_ep(&ep->com); | |
1328 | ||
1329 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); | |
1330 | return ret; | |
1331 | } | |
1332 | ||
1333 | int c4iw_destroy_qp(struct ib_qp *ib_qp) | |
1334 | { | |
1335 | struct c4iw_dev *rhp; | |
1336 | struct c4iw_qp *qhp; | |
1337 | struct c4iw_qp_attributes attrs; | |
1338 | struct c4iw_ucontext *ucontext; | |
1339 | ||
1340 | qhp = to_c4iw_qp(ib_qp); | |
1341 | rhp = qhp->rhp; | |
1342 | ||
1343 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
1344 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | |
1345 | wait_event(qhp->wait, !qhp->ep); | |
1346 | ||
1347 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | |
cfdda9d7 SW |
1348 | atomic_dec(&qhp->refcnt); |
1349 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); | |
1350 | ||
1351 | ucontext = ib_qp->uobject ? | |
1352 | to_c4iw_ucontext(ib_qp->uobject->context) : NULL; | |
1353 | destroy_qp(&rhp->rdev, &qhp->wq, | |
1354 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1355 | ||
1356 | PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); | |
1357 | kfree(qhp); | |
1358 | return 0; | |
1359 | } | |
1360 | ||
1361 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |
1362 | struct ib_udata *udata) | |
1363 | { | |
1364 | struct c4iw_dev *rhp; | |
1365 | struct c4iw_qp *qhp; | |
1366 | struct c4iw_pd *php; | |
1367 | struct c4iw_cq *schp; | |
1368 | struct c4iw_cq *rchp; | |
1369 | struct c4iw_create_qp_resp uresp; | |
1370 | int sqsize, rqsize; | |
1371 | struct c4iw_ucontext *ucontext; | |
1372 | int ret; | |
1373 | struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4; | |
1374 | ||
1375 | PDBG("%s ib_pd %p\n", __func__, pd); | |
1376 | ||
1377 | if (attrs->qp_type != IB_QPT_RC) | |
1378 | return ERR_PTR(-EINVAL); | |
1379 | ||
1380 | php = to_c4iw_pd(pd); | |
1381 | rhp = php->rhp; | |
1382 | schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); | |
1383 | rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); | |
1384 | if (!schp || !rchp) | |
1385 | return ERR_PTR(-EINVAL); | |
1386 | ||
1387 | if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) | |
1388 | return ERR_PTR(-EINVAL); | |
1389 | ||
1390 | rqsize = roundup(attrs->cap.max_recv_wr + 1, 16); | |
1391 | if (rqsize > T4_MAX_RQ_SIZE) | |
1392 | return ERR_PTR(-E2BIG); | |
1393 | ||
1394 | sqsize = roundup(attrs->cap.max_send_wr + 1, 16); | |
1395 | if (sqsize > T4_MAX_SQ_SIZE) | |
1396 | return ERR_PTR(-E2BIG); | |
1397 | ||
1398 | ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; | |
1399 | ||
1400 | ||
1401 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); | |
1402 | if (!qhp) | |
1403 | return ERR_PTR(-ENOMEM); | |
1404 | qhp->wq.sq.size = sqsize; | |
1405 | qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue; | |
1406 | qhp->wq.rq.size = rqsize; | |
1407 | qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue; | |
1408 | ||
1409 | if (ucontext) { | |
1410 | qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); | |
1411 | qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); | |
1412 | } | |
1413 | ||
1414 | PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n", | |
1415 | __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize); | |
1416 | ||
1417 | ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, | |
1418 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1419 | if (ret) | |
1420 | goto err1; | |
1421 | ||
1422 | attrs->cap.max_recv_wr = rqsize - 1; | |
1423 | attrs->cap.max_send_wr = sqsize - 1; | |
1424 | attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; | |
1425 | ||
1426 | qhp->rhp = rhp; | |
1427 | qhp->attr.pd = php->pdid; | |
1428 | qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; | |
1429 | qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; | |
1430 | qhp->attr.sq_num_entries = attrs->cap.max_send_wr; | |
1431 | qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; | |
1432 | qhp->attr.sq_max_sges = attrs->cap.max_send_sge; | |
1433 | qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; | |
1434 | qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; | |
1435 | qhp->attr.state = C4IW_QP_STATE_IDLE; | |
1436 | qhp->attr.next_state = C4IW_QP_STATE_IDLE; | |
1437 | qhp->attr.enable_rdma_read = 1; | |
1438 | qhp->attr.enable_rdma_write = 1; | |
1439 | qhp->attr.enable_bind = 1; | |
1440 | qhp->attr.max_ord = 1; | |
1441 | qhp->attr.max_ird = 1; | |
1442 | spin_lock_init(&qhp->lock); | |
1443 | init_waitqueue_head(&qhp->wait); | |
1444 | atomic_set(&qhp->refcnt, 1); | |
1445 | ||
1446 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); | |
1447 | if (ret) | |
1448 | goto err2; | |
1449 | ||
cfdda9d7 SW |
1450 | if (udata) { |
1451 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); | |
1452 | if (!mm1) { | |
1453 | ret = -ENOMEM; | |
30a6a62f | 1454 | goto err3; |
cfdda9d7 SW |
1455 | } |
1456 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | |
1457 | if (!mm2) { | |
1458 | ret = -ENOMEM; | |
30a6a62f | 1459 | goto err4; |
cfdda9d7 SW |
1460 | } |
1461 | mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); | |
1462 | if (!mm3) { | |
1463 | ret = -ENOMEM; | |
30a6a62f | 1464 | goto err5; |
cfdda9d7 SW |
1465 | } |
1466 | mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); | |
1467 | if (!mm4) { | |
1468 | ret = -ENOMEM; | |
30a6a62f | 1469 | goto err6; |
cfdda9d7 SW |
1470 | } |
1471 | ||
1472 | uresp.qid_mask = rhp->rdev.qpmask; | |
1473 | uresp.sqid = qhp->wq.sq.qid; | |
1474 | uresp.sq_size = qhp->wq.sq.size; | |
1475 | uresp.sq_memsize = qhp->wq.sq.memsize; | |
1476 | uresp.rqid = qhp->wq.rq.qid; | |
1477 | uresp.rq_size = qhp->wq.rq.size; | |
1478 | uresp.rq_memsize = qhp->wq.rq.memsize; | |
1479 | spin_lock(&ucontext->mmap_lock); | |
1480 | uresp.sq_key = ucontext->key; | |
1481 | ucontext->key += PAGE_SIZE; | |
1482 | uresp.rq_key = ucontext->key; | |
1483 | ucontext->key += PAGE_SIZE; | |
1484 | uresp.sq_db_gts_key = ucontext->key; | |
1485 | ucontext->key += PAGE_SIZE; | |
1486 | uresp.rq_db_gts_key = ucontext->key; | |
1487 | ucontext->key += PAGE_SIZE; | |
1488 | spin_unlock(&ucontext->mmap_lock); | |
1489 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); | |
1490 | if (ret) | |
30a6a62f | 1491 | goto err7; |
cfdda9d7 SW |
1492 | mm1->key = uresp.sq_key; |
1493 | mm1->addr = virt_to_phys(qhp->wq.sq.queue); | |
1494 | mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); | |
1495 | insert_mmap(ucontext, mm1); | |
1496 | mm2->key = uresp.rq_key; | |
1497 | mm2->addr = virt_to_phys(qhp->wq.rq.queue); | |
1498 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); | |
1499 | insert_mmap(ucontext, mm2); | |
1500 | mm3->key = uresp.sq_db_gts_key; | |
1501 | mm3->addr = qhp->wq.sq.udb; | |
1502 | mm3->len = PAGE_SIZE; | |
1503 | insert_mmap(ucontext, mm3); | |
1504 | mm4->key = uresp.rq_db_gts_key; | |
1505 | mm4->addr = qhp->wq.rq.udb; | |
1506 | mm4->len = PAGE_SIZE; | |
1507 | insert_mmap(ucontext, mm4); | |
1508 | } | |
1509 | qhp->ibqp.qp_num = qhp->wq.sq.qid; | |
1510 | init_timer(&(qhp->timer)); | |
1511 | PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n", | |
1512 | __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, | |
1513 | qhp->wq.sq.qid); | |
1514 | return &qhp->ibqp; | |
cfdda9d7 | 1515 | err7: |
30a6a62f | 1516 | kfree(mm4); |
cfdda9d7 | 1517 | err6: |
30a6a62f | 1518 | kfree(mm3); |
cfdda9d7 | 1519 | err5: |
30a6a62f | 1520 | kfree(mm2); |
cfdda9d7 | 1521 | err4: |
30a6a62f | 1522 | kfree(mm1); |
cfdda9d7 SW |
1523 | err3: |
1524 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | |
1525 | err2: | |
1526 | destroy_qp(&rhp->rdev, &qhp->wq, | |
1527 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1528 | err1: | |
1529 | kfree(qhp); | |
1530 | return ERR_PTR(ret); | |
1531 | } | |
1532 | ||
1533 | int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1534 | int attr_mask, struct ib_udata *udata) | |
1535 | { | |
1536 | struct c4iw_dev *rhp; | |
1537 | struct c4iw_qp *qhp; | |
1538 | enum c4iw_qp_attr_mask mask = 0; | |
1539 | struct c4iw_qp_attributes attrs; | |
1540 | ||
1541 | PDBG("%s ib_qp %p\n", __func__, ibqp); | |
1542 | ||
1543 | /* iwarp does not support the RTR state */ | |
1544 | if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) | |
1545 | attr_mask &= ~IB_QP_STATE; | |
1546 | ||
1547 | /* Make sure we still have something left to do */ | |
1548 | if (!attr_mask) | |
1549 | return 0; | |
1550 | ||
1551 | memset(&attrs, 0, sizeof attrs); | |
1552 | qhp = to_c4iw_qp(ibqp); | |
1553 | rhp = qhp->rhp; | |
1554 | ||
1555 | attrs.next_state = c4iw_convert_state(attr->qp_state); | |
1556 | attrs.enable_rdma_read = (attr->qp_access_flags & | |
1557 | IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
1558 | attrs.enable_rdma_write = (attr->qp_access_flags & | |
1559 | IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
1560 | attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; | |
1561 | ||
1562 | ||
1563 | mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; | |
1564 | mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? | |
1565 | (C4IW_QP_ATTR_ENABLE_RDMA_READ | | |
1566 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | | |
1567 | C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; | |
1568 | ||
1569 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); | |
1570 | } | |
1571 | ||
1572 | struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) | |
1573 | { | |
1574 | PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); | |
1575 | return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); | |
1576 | } |